Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2020 20:25:53 +0000 (13:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2020 20:25:53 +0000 (13:25 -0700)
Merge some more updates from Andrew Morton:

 - various hotfixes and minor things

 - hch's use_mm/unuse_mm clearnups

Subsystems affected by this patch series: mm/hugetlb, scripts, kcov,
lib, nilfs, checkpatch, lib, mm/debug, ocfs2, lib, misc.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  kernel: set USER_DS in kthread_use_mm
  kernel: better document the use_mm/unuse_mm API contract
  kernel: move use_mm/unuse_mm to kthread.c
  kernel: move use_mm/unuse_mm to kthread.c
  stacktrace: cleanup inconsistent variable type
  lib: test get_count_order/long in test_bitops.c
  mm: add comments on pglist_data zones
  ocfs2: fix spelling mistake and grammar
  mm/debug_vm_pgtable: fix kernel crash by checking for THP support
  lib: fix bitmap_parse() on 64-bit big endian archs
  checkpatch: correct check for kernel parameters doc
  nilfs2: fix null pointer dereference at nilfs_segctor_do_construct()
  lib/lz4/lz4_decompress.c: document deliberate use of `&'
  kcov: check kcov_softirq in kcov_remote_stop()
  scripts/spelling: add a few more typos
  khugepaged: selftests: fix timeout condition in wait_for_scan()

445 files changed:
Documentation/COPYING-logo
Documentation/admin-guide/LSM/tomoyo.rst
Documentation/admin-guide/acpi/initrd_table_override.rst
Documentation/admin-guide/bcache.rst
Documentation/admin-guide/devices.rst
Documentation/admin-guide/initrd.rst
Documentation/admin-guide/md.rst
Documentation/admin-guide/mono.rst
Documentation/admin-guide/reporting-bugs.rst
Documentation/admin-guide/unicode.rst
Documentation/conf.py
Documentation/core-api/rbtree.rst
Documentation/dev-tools/coccinelle.rst
Documentation/dev-tools/gdb-kernel-debugging.rst
Documentation/devicetree/bindings/input/elants_i2c.txt [deleted file]
Documentation/devicetree/bindings/input/iqs269a.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/msm-vibrator.txt [deleted file]
Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/mms114.txt
Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt [deleted file]
Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
Documentation/devicetree/bindings/mtd/partition.txt
Documentation/devicetree/bindings/opp/opp.txt
Documentation/devicetree/bindings/property-units.txt
Documentation/devicetree/bindings/sound/tdm-slot.txt
Documentation/doc-guide/parse-headers.rst
Documentation/driver-api/acpi/linuxized-acpica.rst
Documentation/driver-api/driver-model/devres.rst
Documentation/driver-api/mtdnand.rst
Documentation/driver-api/usb/bulk-streams.rst
Documentation/driver-api/usb/writing_musb_glue_layer.rst
Documentation/filesystems/locking.rst
Documentation/filesystems/path-lookup.txt
Documentation/filesystems/seq_file.rst
Documentation/misc-devices/c2port.txt
Documentation/process/3.Early-stage.rst
Documentation/process/7.AdvancedTopics.rst
Documentation/process/8.Conclusion.rst
Documentation/process/adding-syscalls.rst
Documentation/process/applying-patches.rst
Documentation/process/changes.rst
Documentation/process/volatile-considered-harmful.rst
Documentation/security/SCTP.rst
Documentation/sphinx/kfigure.py
Documentation/static-keys.txt
Documentation/trace/events-msr.rst
Documentation/trace/mmiotrace.rst
Documentation/translations/it_IT/kernel-hacking/hacking.rst
Documentation/translations/it_IT/process/email-clients.rst
Documentation/translations/it_IT/process/management-style.rst
Documentation/translations/ko_KR/memory-barriers.txt
Documentation/translations/zh_CN/filesystems/debugfs.rst
Documentation/vm/ksm.rst
Documentation/xz.txt
MAINTAINERS
arch/alpha/kernel/setup.c
arch/arm/crypto/Kconfig
arch/arm64/Kconfig
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/barrier.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/vdso32/Makefile
arch/arm64/lib/csum.c
arch/m68k/coldfire/pci.c
arch/m68k/configs/stmark2_defconfig
arch/m68k/include/asm/uaccess_no.h
arch/powerpc/mm/pgtable.c
arch/riscv/Kconfig
arch/riscv/include/asm/clocksource.h [new file with mode: 0644]
arch/riscv/include/asm/irq.h
arch/riscv/include/asm/processor.h
arch/riscv/include/asm/smp.h
arch/riscv/include/asm/vdso.h
arch/riscv/include/asm/vdso/clocksource.h [new file with mode: 0644]
arch/riscv/include/asm/vdso/gettimeofday.h [new file with mode: 0644]
arch/riscv/include/asm/vdso/processor.h [new file with mode: 0644]
arch/riscv/include/asm/vdso/vsyscall.h [new file with mode: 0644]
arch/riscv/kernel/cpu.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/irq.c
arch/riscv/kernel/patch.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/time.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/clock_getres.S [deleted file]
arch/riscv/kernel/vdso/clock_gettime.S [deleted file]
arch/riscv/kernel/vdso/gettimeofday.S [deleted file]
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/kernel/vdso/vgettimeofday.c [new file with mode: 0644]
arch/riscv/mm/init.c
arch/um/drivers/vector_kern.h
arch/um/drivers/vector_user.c
arch/um/drivers/vhost_user.h
arch/um/drivers/virtio_uml.c
arch/um/os-Linux/file.c
arch/x86/include/asm/pgtable_32.h
arch/x86/kernel/sys_ia32.c
arch/x86/kvm/hyperv.c
crypto/Kconfig
drivers/acpi/acpica/dbdisply.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/device_pm.c
drivers/acpi/numa/srat.c
drivers/acpi/scan.c
drivers/cdrom/cdrom.c
drivers/char/random.c
drivers/clocksource/timer-riscv.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/tegra186-cpufreq.c
drivers/crypto/cavium/nitrox/nitrox_main.c
drivers/crypto/omap-aes-gcm.c
drivers/crypto/omap-aes.c
drivers/crypto/omap-crypto.c
drivers/crypto/omap-sham.c
drivers/crypto/virtio/virtio_crypto_algs.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_query.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/sun4i/sun4i_hdmi.h
drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
drivers/hid/hid-alps.c
drivers/hid/hid-multitouch.c
drivers/input/evdev.c
drivers/input/joystick/Kconfig
drivers/input/keyboard/Kconfig
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/imx_sc_key.c
drivers/input/keyboard/tca6416-keypad.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/gp2ap002a00f.c [deleted file]
drivers/input/misc/iqs269a.c [new file with mode: 0644]
drivers/input/misc/msm-vibrator.c [deleted file]
drivers/input/misc/xen-kbdfront.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/cy8ctma140.c [new file with mode: 0644]
drivers/input/touchscreen/cyttsp4_core.c
drivers/input/touchscreen/cyttsp_core.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/melfas_mip4.c
drivers/input/touchscreen/mms114.c
drivers/input/touchscreen/raspberrypi-ts.c
drivers/input/touchscreen/stmfts.c
drivers/interconnect/core.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-riscv-intc.c [new file with mode: 0644]
drivers/irqchip/irq-sifive-plic.c
drivers/mailbox/Kconfig
drivers/mailbox/Makefile
drivers/mailbox/imx-mailbox.c
drivers/mailbox/pcc.c
drivers/mailbox/qcom-apcs-ipc-mailbox.c
drivers/mailbox/qcom-ipcc.c [new file with mode: 0644]
drivers/mailbox/sprd-mailbox.c [new file with mode: 0644]
drivers/mailbox/zynqmp-ipi-mailbox.c
drivers/misc/mic/Kconfig
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/devices/docg3.c
drivers/mtd/maps/physmap-gemini.c
drivers/mtd/mtdblock.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/raw/Kconfig
drivers/mtd/nand/raw/Makefile
drivers/mtd/nand/raw/ams-delta.c
drivers/mtd/nand/raw/arasan-nand-controller.c [new file with mode: 0644]
drivers/mtd/nand/raw/atmel/nand-controller.c
drivers/mtd/nand/raw/au1550nd.c
drivers/mtd/nand/raw/bcm47xxnflash/main.c
drivers/mtd/nand/raw/brcmnand/brcmnand.c
drivers/mtd/nand/raw/cadence-nand-controller.c
drivers/mtd/nand/raw/cafe_nand.c
drivers/mtd/nand/raw/cmx270_nand.c [deleted file]
drivers/mtd/nand/raw/cs553x_nand.c
drivers/mtd/nand/raw/davinci_nand.c
drivers/mtd/nand/raw/denali.c
drivers/mtd/nand/raw/diskonchip.c
drivers/mtd/nand/raw/fsl_elbc_nand.c
drivers/mtd/nand/raw/fsl_ifc_nand.c
drivers/mtd/nand/raw/fsl_upm.c
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/gpio.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/hisi504_nand.c
drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
drivers/mtd/nand/raw/internals.h
drivers/mtd/nand/raw/lpc32xx_mlc.c
drivers/mtd/nand/raw/lpc32xx_slc.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/raw/meson_nand.c
drivers/mtd/nand/raw/mpc5121_nfc.c
drivers/mtd/nand/raw/mtk_nand.c
drivers/mtd/nand/raw/mxc_nand.c
drivers/mtd/nand/raw/mxic_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_bch.c
drivers/mtd/nand/raw/nand_jedec.c
drivers/mtd/nand/raw/nand_legacy.c
drivers/mtd/nand/raw/nand_micron.c
drivers/mtd/nand/raw/nand_onfi.c
drivers/mtd/nand/raw/nand_timings.c
drivers/mtd/nand/raw/nand_toshiba.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/ndfc.c
drivers/mtd/nand/raw/omap2.c
drivers/mtd/nand/raw/omap_elm.c
drivers/mtd/nand/raw/orion_nand.c
drivers/mtd/nand/raw/oxnas_nand.c
drivers/mtd/nand/raw/pasemi_nand.c
drivers/mtd/nand/raw/plat_nand.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/nand/raw/r852.c
drivers/mtd/nand/raw/s3c2410.c
drivers/mtd/nand/raw/sh_flctl.c
drivers/mtd/nand/raw/sharpsl.c
drivers/mtd/nand/raw/socrates_nand.c
drivers/mtd/nand/raw/stm32_fmc2_nand.c
drivers/mtd/nand/raw/sunxi_nand.c
drivers/mtd/nand/raw/tango_nand.c
drivers/mtd/nand/raw/tegra_nand.c
drivers/mtd/nand/raw/tmio_nand.c
drivers/mtd/nand/raw/txx9ndfmc.c
drivers/mtd/nand/raw/vf610_nfc.c
drivers/mtd/nand/raw/xway_nand.c
drivers/mtd/parsers/cmdlinepart.c
drivers/mtd/parsers/ofpart.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/controllers/Kconfig
drivers/mtd/spi-nor/controllers/aspeed-smc.c
drivers/mtd/spi-nor/controllers/hisi-sfc.c
drivers/mtd/spi-nor/controllers/nxp-spifi.c
drivers/mtd/spi-nor/core.c
drivers/mtd/spi-nor/macronix.c
drivers/mtd/spi-nor/micron-st.c
drivers/mtd/spi-nor/sfdp.c
drivers/mtd/spi-nor/sfdp.h
drivers/mtd/spi-nor/spansion.c
drivers/mtd/spi-nor/winbond.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/fastmap-wl.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/wl.c
drivers/net/caif/Kconfig
drivers/opp/core.c
drivers/opp/debugfs.c
drivers/opp/of.c
drivers/opp/opp.h
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/vdpa/Kconfig
drivers/vdpa/ifcvf/ifcvf_base.c
drivers/vdpa/ifcvf/ifcvf_base.h
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vhost/Kconfig
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/test.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vringh.c
drivers/vhost/vsock.c
drivers/virtio/Kconfig
drivers/virtio/Makefile
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mem.c [new file with mode: 0644]
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_modern.c
drivers/xen/time.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/binfmt_flat.c
fs/eventfd.c
fs/locks.c
fs/namespace.c
fs/nfs/direct.c
fs/nfs/dns_resolve.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/nfs/nfstrace.h
fs/nfs/pagelist.c
fs/nfs/read.c
fs/nfs/sysfs.h
fs/nfsd/cache.h
fs/nfsd/netns.h
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfscache.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsd.h
fs/nfsd/nfssvc.c
fs/nfsd/state.h
fs/nfsd/trace.h
fs/proc/proc_sysctl.c
fs/proc/root.c
fs/select.c
fs/super.c
fs/xfs/xfs_ioctl.c
include/acpi/acpixf.h
include/acpi/actypes.h
include/asm-generic/barrier.h
include/dt-bindings/mailbox/qcom-ipcc.h [new file with mode: 0644]
include/linux/bch.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_types.h
include/linux/cpufreq.h
include/linux/cpuhotplug.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/input/gp2ap002a00f.h [deleted file]
include/linux/input/mt.h
include/linux/interconnect.h
include/linux/kthread.h
include/linux/memory_hotplug.h
include/linux/mtd/bbm.h
include/linux/mtd/cfi.h
include/linux/mtd/mtd.h
include/linux/mtd/partitions.h
include/linux/mtd/qinfo.h
include/linux/mtd/rawnand.h
include/linux/mtd/spi-nor.h
include/linux/nfs4.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/page-flags.h
include/linux/platform_data/mtd-davinci.h
include/linux/platform_data/mtd-nand-s3c2410.h
include/linux/pm_opp.h
include/linux/regset.h
include/linux/sunrpc/auth.h
include/linux/sunrpc/gss_api.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/svc_rdma.h
include/linux/sunrpc/svc_xprt.h
include/linux/sunrpc/svcauth_gss.h
include/linux/sunrpc/svcsock.h
include/linux/vdpa.h
include/linux/vringh.h
include/net/neighbour.h
include/trace/events/rpcgss.h
include/trace/events/rpcrdma.h
include/trace/events/sunrpc.h
include/uapi/linux/vhost.h
include/uapi/linux/virtio_ids.h
include/uapi/linux/virtio_mem.h [new file with mode: 0644]
include/uapi/linux/virtio_ring.h
include/uapi/mtd/mtd-abi.h
init/Kconfig
kernel/bpf/syscall.c
kernel/gcov/Kconfig
kernel/gcov/Makefile
kernel/gcov/gcc_3_4.c [deleted file]
kernel/kthread.c
kernel/power/swap.c
kernel/scs.c
kernel/trace/trace_stack.c
lib/bch.c
lib/fault-inject.c
mm/memory_hotplug.c
mm/page_alloc.c
mm/page_isolation.c
net/core/sysctl_net_core.c
net/netfilter/core.c
net/sunrpc/addr.c
net/sunrpc/auth.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_mech_switch.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/auth_gss/trace.c
net/sunrpc/clnt.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/sunrpc.h
net/sunrpc/sunrpc_syms.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth.c
net/sunrpc/svcauth_unix.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtsock.c
net/tls/tls_main.c
scripts/gcc-plugins/Kconfig
scripts/kernel-doc
sound/core/pcm_native.c
sound/pci/emu10k1/emu10k1x.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/max98390.c
sound/soc/codecs/max98390.h
sound/soc/codecs/rl6231.c
sound/soc/codecs/rt5645.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/glk_rt5682_max98357a.c
sound/soc/intel/boards/kbl_da7219_max98927.c
sound/soc/intel/boards/kbl_rt5663_max98927.c
sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
sound/soc/meson/axg-fifo.c
sound/soc/meson/meson-card-utils.c
sound/soc/soc-core.c
sound/soc/soc-pcm.c
sound/soc/sof/nocodec.c
sound/usb/card.c
sound/usb/quirks-table.h
sound/usb/usbaudio.h
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c

index 296f0f7..b21c7cf 100644 (file)
@@ -9,5 +9,5 @@ scale down to smaller sizes and are better for letterheads or whatever
 you want to use it for: for the full range of logos take a look at
 Larry's web-page:
 
-       http://www.isc.tamu.edu/~lewing/linux/
+       https://www.isc.tamu.edu/~lewing/linux/
 
index e2d6b6e..4bc9c2b 100644 (file)
@@ -27,29 +27,29 @@ Where is documentation?
 =======================
 
 User <-> Kernel interface documentation is available at
-http://tomoyo.osdn.jp/2.5/policy-specification/index.html .
+https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
 
 Materials we prepared for seminars and symposiums are available at
-http://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
+https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
 Below lists are chosen from three aspects.
 
 What is TOMOYO?
   TOMOYO Linux Overview
-    http://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
+    https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
   TOMOYO Linux: pragmatic and manageable security for Linux
-    http://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
+    https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
   TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
-    http://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
+    https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
 
 What can TOMOYO do?
   Deep inside TOMOYO Linux
-    http://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
+    https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
   The role of "pathname based access control" in security.
-    http://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
+    https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
 
 History of TOMOYO?
   Realities of Mainlining
-    http://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
+    https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
 
 What is future plan?
 ====================
index cbd7682..bb24fa6 100644 (file)
@@ -102,7 +102,7 @@ Where to retrieve userspace tools
 =================================
 
 iasl and acpixtract are part of Intel's ACPICA project:
-http://acpica.org/
+https://acpica.org/
 
 and should be packaged by distributions (for example in the acpica package
 on SUSE).
index c0ce64d..1eccf95 100644 (file)
@@ -7,9 +7,9 @@ nice if you could use them as cache... Hence bcache.
 
 Wiki and git repositories are at:
 
-  - http://bcache.evilpiepirate.org
+  - https://bcache.evilpiepirate.org
   - http://evilpiepirate.org/git/linux-bcache.git
-  - http://evilpiepirate.org/git/bcache-tools.git
+  - https://evilpiepirate.org/git/bcache-tools.git
 
 It's designed around the performance characteristics of SSDs - it only allocates
 in erase block sized buckets, and it uses a hybrid btree/log to track cached
index d41671a..035275f 100644 (file)
@@ -17,7 +17,7 @@ Specifically explore the sections titled "CHAR and MISC DRIVERS", and
 to involve for character and block devices.
 
 This document is included by reference into the Filesystem Hierarchy
-Standard (FHS).         The FHS is available from http://www.pathname.com/fhs/.
+Standard (FHS).         The FHS is available from https://www.pathname.com/fhs/.
 
 Allocations marked (68k/Amiga) apply to Linux/68k on the Amiga
 platform only. Allocations marked (68k/Atari) apply to Linux/68k on
index a03daba..67bbad8 100644 (file)
@@ -376,7 +376,7 @@ Resources
 ---------
 
 .. [#f1] Almesberger, Werner; "Booting Linux: The History and the Future"
-    http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
+    https://www.almesberger.net/cv/papers/ols2k-9.ps.gz
 .. [#f2] newlib package (experimental), with initrd example
     https://www.sourceware.org/newlib/
 .. [#f3] util-linux: Miscellaneous utilities for Linux
index 3c51084..d973d46 100644 (file)
@@ -5,7 +5,7 @@ Boot time assembly of RAID arrays
 ---------------------------------
 
 Tools that manage md devices can be found at
-   http://www.kernel.org/pub/linux/utils/raid/
+   https://www.kernel.org/pub/linux/utils/raid/
 
 
 You can boot with your md device with the following kernel command
index 59e6d59..c6dab56 100644 (file)
@@ -12,11 +12,11 @@ other program after you have done the following:
    a binary package, a source tarball or by installing from Git. Binary
    packages for several distributions can be found at:
 
-       http://www.mono-project.com/download/
+       https://www.mono-project.com/download/
 
    Instructions for compiling Mono can be found at:
 
-       http://www.mono-project.com/docs/compiling-mono/linux/
+       https://www.mono-project.com/docs/compiling-mono/linux/
 
    Once the Mono CLR support has been installed, just check that
    ``/usr/bin/mono`` (which could be located elsewhere, for example
index 49ac8dc..42481ea 100644 (file)
@@ -75,7 +75,7 @@ Tips for reporting bugs
 
 If you haven't reported a bug before, please read:
 
-       http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
+       https://www.chiark.greenend.org.uk/~sgtatham/bugs.html
 
        http://www.catb.org/esr/faqs/smart-questions.html
 
index 7425a33..290fe83 100644 (file)
@@ -114,7 +114,7 @@ Unicode practice.
   This range is now officially managed by the ConScript Unicode
   Registry.  The normative reference is at:
 
-       http://www.evertype.com/standards/csur/klingon.html
+       https://www.evertype.com/standards/csur/klingon.html
 
 Klingon has an alphabet of 26 characters, a positional numeric writing
 system with 10 digits, and is written left-to-right, top-to-bottom.
@@ -178,7 +178,7 @@ fictional and artificial scripts has been established by John Cowan
 <jcowan@reutershealth.com> and Michael Everson <everson@evertype.com>.
 The ConScript Unicode Registry is accessible at:
 
-         http://www.evertype.com/standards/csur/
+         https://www.evertype.com/standards/csur/
 
 The ranges used fall at the low end of the End User Zone and can hence
 not be normatively assigned, but it is recommended that people who
index f6a1bc0..c503188 100644 (file)
@@ -538,7 +538,7 @@ epub_exclude_files = ['search.html']
 # Grouping the document tree into PDF files. List of tuples
 # (source start file, target name, title, author, options).
 #
-# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
+# See the Sphinx chapter of https://ralsina.me/static/manual.pdf
 #
 # FIXME: Do not add the index file here; the result will be too big. Adding
 # multiple PDF files here actually tries to get the cross-referencing right
index 523d54b..6b88837 100644 (file)
@@ -36,10 +36,10 @@ This document covers use of the Linux rbtree implementation.  For more
 information on the nature and implementation of Red Black Trees,  see:
 
   Linux Weekly News article on red-black trees
-    http://lwn.net/Articles/184495/
+    https://lwn.net/Articles/184495/
 
   Wikipedia entry on red-black trees
-    http://en.wikipedia.org/wiki/Red-black_tree
+    https://en.wikipedia.org/wiki/Red-black_tree
 
 Linux implementation of red-black trees
 ---------------------------------------
index 00a3409..70274c3 100644 (file)
@@ -14,7 +14,7 @@ many uses in kernel development, including the application of complex,
 tree-wide patches and detection of problematic programming patterns.
 
 Getting Coccinelle
--------------------
+------------------
 
 The semantic patches included in the kernel use features and options
 which are provided by Coccinelle version 1.0.0-rc11 and above.
@@ -56,7 +56,7 @@ found at:
 https://github.com/coccinelle/coccinelle/blob/master/install.txt
 
 Supplemental documentation
----------------------------
+--------------------------
 
 For supplemental documentation refer to the wiki:
 
@@ -128,7 +128,7 @@ To enable verbose messages set the V= variable, for example::
    make coccicheck MODE=report V=1
 
 Coccinelle parallelization
----------------------------
+--------------------------
 
 By default, coccicheck tries to run as parallel as possible. To change
 the parallelism, set the J= variable. For example, to run across 4 CPUs::
@@ -333,7 +333,7 @@ as an example if requiring at least Coccinelle >= 1.0.5::
        // Requires: 1.0.5
 
 Proposing new semantic patches
--------------------------------
+------------------------------
 
 New semantic patches can be proposed and submitted by kernel
 developers. For sake of clarity, they should be organized in the
index 19df792..4756f6b 100644 (file)
@@ -24,7 +24,7 @@ Setup
 
 - Create a virtual Linux machine for QEMU/KVM (see www.linux-kvm.org and
   www.qemu.org for more details). For cross-development,
-  http://landley.net/aboriginal/bin keeps a pool of machine images and
+  https://landley.net/aboriginal/bin keeps a pool of machine images and
   toolchains that can be helpful to start from.
 
 - Build the kernel with CONFIG_GDB_SCRIPTS enabled, but leave
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
deleted file mode 100644 (file)
index 5edac8b..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-Elantech I2C Touchscreen
-
-Required properties:
-- compatible: must be "elan,ekth3500".
-- reg: I2C address of the chip.
-- interrupts: interrupt to which the chip is connected (see interrupt
-  binding[0]).
-
-Optional properties:
-- wakeup-source: touchscreen can be used as a wakeup source.
-- pinctrl-names: should be "default" (see pinctrl binding [1]).
-- pinctrl-0: a phandle pointing to the pin settings for the device (see
-  pinctrl binding [1]).
-- reset-gpios: reset gpio the chip is connected to.
-- vcc33-supply: a phandle for the regulator supplying 3.3V power.
-- vccio-supply: a phandle for the regulator supplying IO power.
-
-[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-
-Example:
-       &i2c1 {
-               /* ... */
-
-               touchscreen@10 {
-                       compatible = "elan,ekth3500";
-                       reg = <0x10>;
-                       interrupt-parent = <&gpio4>;
-                       interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
-                       wakeup-source;
-               };
-
-               /* ... */
-       };
diff --git a/Documentation/devicetree/bindings/input/iqs269a.yaml b/Documentation/devicetree/bindings/input/iqs269a.yaml
new file mode 100644 (file)
index 0000000..f0242bb
--- /dev/null
@@ -0,0 +1,581 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/iqs269a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Azoteq IQS269A Capacitive Touch Controller
+
+maintainers:
+  - Jeff LaBundy <jeff@labundy.com>
+
+description: |
+  The Azoteq IQS269A is an 8-channel capacitive touch controller that features
+  additional Hall-effect and inductive sensing capabilities.
+
+  Link to datasheet: https://www.azoteq.com/
+
+properties:
+  compatible:
+    const: azoteq,iqs269a
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  azoteq,hall-enable:
+    type: boolean
+    description:
+      Enables Hall-effect sensing on channels 6 and 7. In this case, keycodes
+      assigned to channel 6 are ignored and keycodes assigned to channel 7 are
+      interpreted as switch codes. Refer to the datasheet for requirements im-
+      posed on channels 6 and 7 by Hall-effect sensing.
+
+  azoteq,suspend-mode:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description: |
+      Specifies the power mode during suspend as follows:
+      0: Automatic (same as normal runtime, i.e. suspend/resume disabled)
+      1: Low power (all sensing at a reduced reporting rate)
+      2: Ultra-low power (channel 0 proximity sensing)
+      3: Halt (no sensing)
+
+  azoteq,clk-div:
+    type: boolean
+    description: Divides the device's core clock by a factor of 4.
+
+  azoteq,ulp-update:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - minimum: 0
+        maximum: 7
+        default: 3
+    description: Specifies the ultra-low-power mode update rate.
+
+  azoteq,reseed-offset:
+    type: boolean
+    description:
+      Applies an 8-count offset to all long-term averages upon either ATI or
+      reseed events.
+
+  azoteq,filt-str-lp-lta:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description:
+      Specifies the long-term average filter strength during low-power mode.
+
+  azoteq,filt-str-lp-cnt:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description:
+      Specifies the raw count filter strength during low-power mode.
+
+  azoteq,filt-str-np-lta:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description:
+      Specifies the long-term average filter strength during normal-power mode.
+
+  azoteq,filt-str-np-cnt:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description:
+      Specifies the raw count filter strength during normal-power mode.
+
+  azoteq,rate-np-ms:
+    minimum: 0
+    maximum: 255
+    default: 16
+    description: Specifies the report rate (in ms) during normal-power mode.
+
+  azoteq,rate-lp-ms:
+    minimum: 0
+    maximum: 255
+    default: 160
+    description: Specifies the report rate (in ms) during low-power mode.
+
+  azoteq,rate-ulp-ms:
+    multipleOf: 16
+    minimum: 0
+    maximum: 4080
+    default: 160
+    description: Specifies the report rate (in ms) during ultra-low-power mode.
+
+  azoteq,timeout-pwr-ms:
+    multipleOf: 512
+    minimum: 0
+    maximum: 130560
+    default: 2560
+    description:
+      Specifies the length of time (in ms) to wait for an event during normal-
+      power mode before transitioning to low-power mode.
+
+  azoteq,timeout-lta-ms:
+    multipleOf: 512
+    minimum: 0
+    maximum: 130560
+    default: 32768
+    description:
+      Specifies the length of time (in ms) to wait before resetting the long-
+      term average of all channels. Specify the maximum timeout to disable it
+      altogether.
+
+  azoteq,ati-band-disable:
+    type: boolean
+    description: Disables the ATI band check.
+
+  azoteq,ati-lp-only:
+    type: boolean
+    description: Limits automatic ATI to low-power mode.
+
+  azoteq,ati-band-tighten:
+    type: boolean
+    description: Tightens the ATI band from 1/8 to 1/16 of the desired target.
+
+  azoteq,filt-disable:
+    type: boolean
+    description: Disables all raw count filtering.
+
+  azoteq,gpio3-select:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - minimum: 0
+        maximum: 7
+        default: 0
+    description:
+      Selects the channel for which the GPIO3 pin represents touch state.
+
+  azoteq,dual-direction:
+    type: boolean
+    description:
+      Specifies that long-term averages are to freeze in the presence of either
+      increasing or decreasing counts, thereby permitting events to be reported
+      in either direction.
+
+  azoteq,tx-freq:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description: |
+      Specifies the inductive sensing excitation frequency as follows (paren-
+      thesized numbers represent the frequency if 'azoteq,clk-div' is present):
+      0: 16 MHz (4 MHz)
+      1: 8 MHz (2 MHz)
+      2: 4 MHz (1 MHz)
+      3: 2 MHz (500 kHz)
+
+  azoteq,global-cap-increase:
+    type: boolean
+    description: Increases the global capacitance adder from 0.5 pF to 1.5 pF.
+
+  azoteq,reseed-select:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 0
+    description: |
+      Specifies the event(s) that prompt the device to reseed (i.e. reset the
+      long-term average) of an associated channel as follows:
+      0: None
+      1: Proximity
+      2: Proximity or touch
+      3: Proximity, touch or deep touch
+
+  azoteq,tracking-enable:
+    type: boolean
+    description:
+      Enables all associated channels to track their respective reference
+      channels.
+
+  azoteq,filt-str-slider:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - enum: [0, 1, 2, 3]
+        default: 1
+    description: Specifies the slider coordinate filter strength.
+
+patternProperties:
+  "^channel@[0-7]$":
+    type: object
+    description:
+      Represents a single sensing channel. A channel is active if defined and
+      inactive otherwise.
+
+    properties:
+      reg:
+        minimum: 0
+        maximum: 7
+        description: Index of the channel.
+
+      azoteq,reseed-disable:
+        type: boolean
+        description:
+          Prevents the channel from being reseeded if the long-term average
+          timeout (defined in 'azoteq,timeout-lta') expires.
+
+      azoteq,blocking-enable:
+        type: boolean
+        description: Specifies that the channel is a blocking channel.
+
+      azoteq,slider0-select:
+        type: boolean
+        description: Specifies that the channel participates in slider 0.
+
+      azoteq,slider1-select:
+        type: boolean
+        description: Specifies that the channel participates in slider 1.
+
+      azoteq,rx-enable:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32-array
+          - minItems: 1
+            maxItems: 8
+            items:
+              minimum: 0
+              maximum: 7
+        description:
+          Specifies the CRX pin(s) associated with the channel. By default, only
+          the CRX pin corresponding to the channel's index is enabled (e.g. CRX0
+          for channel 0).
+
+      azoteq,tx-enable:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32-array
+          - minItems: 1
+            maxItems: 8
+            items:
+              minimum: 0
+              maximum: 7
+            default: [0, 1, 2, 3, 4, 5, 6, 7]
+        description: Specifies the TX pin(s) associated with the channel.
+
+      azoteq,meas-cap-decrease:
+        type: boolean
+        description:
+          Decreases the internal measurement capacitance from 60 pF to 15 pF.
+
+      azoteq,rx-float-inactive:
+        type: boolean
+        description: Floats any inactive CRX pins instead of grounding them.
+
+      azoteq,local-cap-size:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - enum: [0, 1, 2]
+            default: 0
+        description: |
+          Specifies the capacitance to be added to the channel as follows:
+          0: None
+          1: Global adder (based on 'azoteq,global-cap-increase')
+          2: Global adder + 0.5 pF
+
+      azoteq,invert-enable:
+        type: boolean
+        description:
+          Inverts the polarity of the states reported for proximity, touch and
+          deep-touch events relative to their respective thresholds.
+
+      azoteq,proj-bias:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - enum: [0, 1, 2, 3]
+            default: 2
+        description: |
+          Specifies the bias current applied during projected-capacitance
+          sensing as follows:
+          0: 2.5 uA
+          1: 5 uA
+          2: 10 uA
+          3: 20 uA
+
+      azoteq,sense-mode:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - enum: [0, 1, 9, 14, 15]
+            default: 0
+        description: |
+          Specifies the channel's sensing mode as follows:
+          0:  Self capacitance
+          1:  Projected capacitance
+          9:  Self or mutual inductance
+          14: Hall effect
+          15: Temperature
+
+      azoteq,sense-freq:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - enum: [0, 1, 2, 3]
+            default: 1
+        description: |
+          Specifies the channel's sensing frequency as follows (parenthesized
+          numbers represent the frequency if 'azoteq,clk-div' is present):
+          0: 4 MHz (1 MHz)
+          1: 2 MHz (500 kHz)
+          2: 1 MHz (250 kHz)
+          3: 500 kHz (125 kHz)
+
+      azoteq,static-enable:
+        type: boolean
+        description: Enables the static front-end for the channel.
+
+      azoteq,ati-mode:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - enum: [0, 1, 2, 3]
+            default: 3
+        description: |
+          Specifies the channel's ATI mode as follows:
+          0: Disabled
+          1: Semi-partial
+          2: Partial
+          3: Full
+
+      azoteq,ati-base:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - enum: [75, 100, 150, 200]
+            default: 100
+        description: Specifies the channel's ATI base.
+
+      azoteq,ati-target:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - multipleOf: 32
+            minimum: 0
+            maximum: 2016
+            default: 512
+        description: Specifies the channel's ATI target.
+
+      azoteq,assoc-select:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32-array
+          - minItems: 1
+            maxItems: 8
+            items:
+              minimum: 0
+              maximum: 7
+        description:
+          Specifies the associated channels for which the channel serves as a
+          reference channel. By default, no channels are selected.
+
+      azoteq,assoc-weight:
+        allOf:
+          - $ref: /schemas/types.yaml#/definitions/uint32
+          - minimum: 0
+            maximum: 255
+            default: 0
+        description:
+          Specifies the channel's impact weight if it acts as an associated
+          channel (0 = 0% impact, 255 = 200% impact).
+
+    patternProperties:
+      "^event-prox(-alt)?$":
+        type: object
+        description:
+          Represents a proximity event reported by the channel in response to
+          a decrease in counts. Node names suffixed with '-alt' instead corre-
+          spond to an increase in counts.
+
+          By default, the long-term average tracks an increase in counts such
+          that only events corresponding to a decrease in counts are reported
+          (refer to the datasheet for more information).
+
+          Specify 'azoteq,dual-direction' to freeze the long-term average when
+          the counts increase or decrease such that events of either direction
+          can be reported. Alternatively, specify 'azoteq,invert-enable' to in-
+          vert the polarity of the states reported by the channel.
+
+          Complementary events (e.g. event-touch and event-touch-alt) can both
+          be present and specify different key or switch codes, but not differ-
+          ent thresholds or hysteresis (if applicable).
+
+        properties:
+          azoteq,thresh:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+              - minimum: 0
+                maximum: 255
+                default: 10
+            description: Specifies the threshold for the event.
+
+          linux,code:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description: Numeric key or switch code associated with the event.
+
+        additionalProperties: false
+
+      "^event-touch(-alt)?$":
+        type: object
+        description: Represents a touch event reported by the channel.
+
+        properties:
+          azoteq,thresh:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+              - minimum: 0
+                maximum: 255
+                default: 8
+            description: Specifies the threshold for the event.
+
+          azoteq,hyst:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+              - minimum: 0
+                maximum: 15
+                default: 4
+            description: Specifies the hysteresis for the event.
+
+          linux,code:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description: Numeric key or switch code associated with the event.
+
+        additionalProperties: false
+
+      "^event-deep(-alt)?$":
+        type: object
+        description: Represents a deep-touch event reported by the channel.
+
+        properties:
+          azoteq,thresh:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+              - minimum: 0
+                maximum: 255
+                default: 26
+            description: Specifies the threshold for the event.
+
+          azoteq,hyst:
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
+              - minimum: 0
+                maximum: 15
+                default: 0
+            description: Specifies the hysteresis for the event.
+
+          linux,code:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description: Numeric key or switch code associated with the event.
+
+        additionalProperties: false
+
+    required:
+      - reg
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - "#address-cells"
+  - "#size-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/input/input.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            iqs269a@44 {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+
+                    compatible = "azoteq,iqs269a";
+                    reg = <0x44>;
+                    interrupt-parent = <&gpio>;
+                    interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+
+                    azoteq,hall-enable;
+                    azoteq,suspend-mode = <2>;
+
+                    channel@0 {
+                            reg = <0x0>;
+
+                            event-prox {
+                                    linux,code = <KEY_POWER>;
+                            };
+                    };
+
+                    channel@1 {
+                            reg = <0x1>;
+                            azoteq,slider0-select;
+                    };
+
+                    channel@2 {
+                            reg = <0x2>;
+                            azoteq,slider0-select;
+                    };
+
+                    channel@3 {
+                            reg = <0x3>;
+                            azoteq,slider0-select;
+                    };
+
+                    channel@4 {
+                            reg = <0x4>;
+                            azoteq,slider0-select;
+                    };
+
+                    channel@5 {
+                            reg = <0x5>;
+                            azoteq,slider0-select;
+                    };
+
+                    channel@6 {
+                            reg = <0x6>;
+                            azoteq,invert-enable;
+                            azoteq,static-enable;
+                            azoteq,reseed-disable;
+                            azoteq,rx-enable = <0>;
+                            azoteq,sense-freq = <0x0>;
+                            azoteq,sense-mode = <0xE>;
+                            azoteq,ati-mode = <0x0>;
+                            azoteq,ati-base = <200>;
+                            azoteq,ati-target = <320>;
+                    };
+
+                    channel@7 {
+                            reg = <0x7>;
+                            azoteq,invert-enable;
+                            azoteq,static-enable;
+                            azoteq,reseed-disable;
+                            azoteq,rx-enable = <0>, <6>;
+                            azoteq,sense-freq = <0x0>;
+                            azoteq,sense-mode = <0xE>;
+                            azoteq,ati-mode = <0x3>;
+                            azoteq,ati-base = <200>;
+                            azoteq,ati-target = <320>;
+
+                            event-touch {
+                                    linux,code = <SW_LID>;
+                            };
+                    };
+            };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/input/msm-vibrator.txt b/Documentation/devicetree/bindings/input/msm-vibrator.txt
deleted file mode 100644 (file)
index 8dcf014..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-* Device tree bindings for the Qualcomm MSM vibrator
-
-Required properties:
-
-  - compatible: Should be one of
-               "qcom,msm8226-vibrator"
-               "qcom,msm8974-vibrator"
-  - reg: the base address and length of the IO memory for the registers.
-  - pinctrl-names: set to default.
-  - pinctrl-0: phandles pointing to pin configuration nodes. See
-               Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-  - clock-names: set to pwm
-  - clocks: phandle of the clock. See
-            Documentation/devicetree/bindings/clock/clock-bindings.txt
-  - enable-gpios: GPIO that enables the vibrator.
-
-Optional properties:
-
-  - vcc-supply: phandle to the regulator that provides power to the sensor.
-
-Example from a LG Nexus 5 (hammerhead) phone:
-
-vibrator@fd8c3450 {
-       reg = <0xfd8c3450 0x400>;
-       compatible = "qcom,msm8974-vibrator";
-
-       vcc-supply = <&pm8941_l19>;
-
-       clocks = <&mmcc CAMSS_GP1_CLK>;
-       clock-names = "pwm";
-
-       enable-gpios = <&msmgpio 60 GPIO_ACTIVE_HIGH>;
-
-       pinctrl-names = "default";
-       pinctrl-0 = <&vibrator_pin>;
-};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml
new file mode 100644 (file)
index 0000000..8c73e52
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/cypress,cy8ctma140.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cypress CY8CTMA140 series touchscreen controller bindings
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    const: cypress,cy8ctma140
+
+  reg:
+    const: 0x20
+
+  clock-frequency:
+    description: I2C client clock frequency, defined for host
+    minimum: 100000
+    maximum: 400000
+
+  interrupts:
+    maxItems: 1
+
+  vcpin-supply:
+    description: Analog power supply regulator on VCPIN pin
+
+  vdd-supply:
+    description: Digital power supply regulator on VDD pin
+
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-swapped-x-y: true
+  touchscreen-max-pressure: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - touchscreen-size-x
+  - touchscreen-size-y
+  - touchscreen-max-pressure
+
+examples:
+- |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      touchscreen@20 {
+        compatible = "cypress,cy8ctma140";
+        reg = <0x20>;
+        touchscreen-size-x = <480>;
+        touchscreen-size-y = <800>;
+        touchscreen-max-pressure = <255>;
+        interrupt-parent = <&gpio6>;
+        interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+        vdd-supply = <&ab8500_ldo_aux2_reg>;
+        vcpin-supply = <&ab8500_ldo_aux2_reg>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml
new file mode 100644 (file)
index 0000000..a792d63
--- /dev/null
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/input/touchscreen/elan,elants_i2c.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Elantech I2C Touchscreen
+
+maintainers:
+  - David Heidelberg <david@ixit.cz>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    enum:
+      - elan,ektf3624
+      - elan,ekth3500
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  wakeup-source:
+    type: boolean
+    description: touchscreen can be used as a wakeup source.
+
+  reset-gpios:
+    description: reset gpio the chip is connected to.
+
+  vcc33-supply:
+    description: a phandle for the regulator supplying 3.3V power.
+
+  vccio-supply:
+    description: a phandle for the regulator supplying IO power.
+
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        touchscreen@10 {
+            compatible = "elan,ekth3500";
+            reg = <0x10>;
+
+            interrupt-parent = <&gpio4>;
+            interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+            wakeup-source;
+        };
+    };
index 2cd9540..707234c 100644 (file)
@@ -1,9 +1,10 @@
-* MELFAS MMS114/MMS152 touchscreen controller
+* MELFAS MMS114/MMS152/MMS345L touchscreen controller
 
 Required properties:
 - compatible: should be one of:
        - "melfas,mms114"
        - "melfas,mms152"
+       - "melfas,mms345l"
 - reg: I2C address of the chip
 - interrupts: interrupt to which the chip is connected
 - touchscreen-size-x: See [1]
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
deleted file mode 100644 (file)
index beec612..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-Binding for the Qualcomm APCS global block
-==========================================
-
-This binding describes the APCS "global" block found in various Qualcomm
-platforms.
-
-- compatible:
-       Usage: required
-       Value type: <string>
-       Definition: must be one of:
-                   "qcom,msm8916-apcs-kpss-global",
-                   "qcom,msm8996-apcs-hmss-global"
-                   "qcom,msm8998-apcs-hmss-global"
-                   "qcom,qcs404-apcs-apps-global"
-                   "qcom,sc7180-apss-shared"
-                   "qcom,sdm845-apss-shared"
-                   "qcom,sm8150-apss-shared"
-                   "qcom,ipq8074-apcs-apps-global"
-
-- reg:
-       Usage: required
-       Value type: <prop-encoded-array>
-       Definition: must specify the base address and size of the global block
-
-- clocks:
-       Usage: required if #clock-names property is present
-       Value type: <phandle array>
-       Definition: phandles to the two parent clocks of the clock driver.
-
-- #mbox-cells:
-       Usage: required
-       Value type: <u32>
-       Definition: as described in mailbox.txt, must be 1
-
-- #clock-cells:
-       Usage: optional
-       Value type: <u32>
-       Definition: as described in clock.txt, must be 0
-
-- clock-names:
-       Usage: required if the platform data based clock driver needs to
-       retrieve the parent clock names from device tree.
-       This will requires two mandatory clocks to be defined.
-       Value type: <string-array>
-       Definition: must be "pll" and "aux"
-
-= EXAMPLE
-The following example describes the APCS HMSS found in MSM8996 and part of the
-GLINK RPM referencing the "rpm_hlos" doorbell therein.
-
-       apcs_glb: mailbox@9820000 {
-               compatible = "qcom,msm8996-apcs-hmss-global";
-               reg = <0x9820000 0x1000>;
-
-               #mbox-cells = <1>;
-       };
-
-       rpm-glink {
-               compatible = "qcom,glink-rpm";
-
-               interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
-
-               qcom,rpm-msg-ram = <&rpm_msg_ram>;
-
-               mboxes = <&apcs_glb 0>;
-               mbox-names = "rpm_hlos";
-       };
-
-Below is another example of the APCS binding on MSM8916 platforms:
-
-       apcs: mailbox@b011000 {
-               compatible = "qcom,msm8916-apcs-kpss-global";
-               reg = <0xb011000 0x1000>;
-               #mbox-cells = <1>;
-               clocks = <&a53pll>;
-               #clock-cells = <0>;
-       };
-
-Below is another example of the APCS binding on QCS404 platforms:
-
-       apcs_glb: mailbox@b011000 {
-               compatible = "qcom,qcs404-apcs-apps-global", "syscon";
-               reg = <0x0b011000 0x1000>;
-               #mbox-cells = <1>;
-               clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
-               clock-names = "pll", "aux";
-               #clock-cells = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
new file mode 100644 (file)
index 0000000..12eff94
--- /dev/null
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mailbox/qcom,apcs-kpss-global.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm APCS global block bindings
+
+description:
+  This binding describes the APCS "global" block found in various Qualcomm
+  platforms.
+
+maintainers:
+  - Sivaprakash Murugesan <sivaprak@codeaurora.org>
+
+properties:
+  compatible:
+    enum:
+      - qcom,ipq8074-apcs-apps-global
+      - qcom,msm8916-apcs-kpss-global
+      - qcom,msm8996-apcs-hmss-global
+      - qcom,msm8998-apcs-hmss-global
+      - qcom,qcs404-apcs-apps-global
+      - qcom,sc7180-apss-shared
+      - qcom,sdm845-apss-shared
+      - qcom,sm8150-apss-shared
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    description: phandles to the parent clocks of the clock driver
+    items:
+      - description: primary pll parent of the clock driver
+      - description: auxiliary parent
+
+  '#mbox-cells':
+    const: 1
+
+  '#clock-cells':
+    const: 0
+
+  clock-names:
+    items:
+      - const: pll
+      - const: aux
+
+required:
+  - compatible
+  - reg
+  - '#mbox-cells'
+
+additionalProperties: false
+
+examples:
+
+  # Example apcs with msm8996
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    apcs_glb: mailbox@9820000 {
+        compatible = "qcom,msm8996-apcs-hmss-global";
+        reg = <0x9820000 0x1000>;
+
+        #mbox-cells = <1>;
+    };
+
+    rpm-glink {
+        compatible = "qcom,glink-rpm";
+        interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+        qcom,rpm-msg-ram = <&rpm_msg_ram>;
+        mboxes = <&apcs_glb 0>;
+        mbox-names = "rpm_hlos";
+    };
+
+  # Example apcs with qcs404
+  - |
+    #define GCC_APSS_AHB_CLK_SRC  1
+    #define GCC_GPLL0_AO_OUT_MAIN 123
+    apcs: mailbox@b011000 {
+        compatible = "qcom,qcs404-apcs-apps-global";
+        reg = <0x0b011000 0x1000>;
+        #mbox-cells = <1>;
+        clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
+        clock-names = "pll", "aux";
+        #clock-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
new file mode 100644 (file)
index 0000000..4ac2123
--- /dev/null
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/qcom-ipcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+
+maintainers:
+  - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+  The Inter-Processor Communication Controller (IPCC) is a centralized hardware
+  to route interrupts across various subsystems. It involves a three-level
+  addressing scheme called protocol, client and signal. For example, consider an
+  entity on the Application Processor Subsystem (APSS) that wants to listen to
+  Modem's interrupts via Shared Memory Point to Point (SMP2P) interface. In such
+  a case, the client would be Modem (client-id is 2) and the signal would be
+  SMP2P (signal-id is 2). The SMP2P itself falls under the Multiprocessor (MPROC)
+  protocol (protocol-id is 0). Refer include/dt-bindings/mailbox/qcom-ipcc.h
+  for the list of such IDs.
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - qcom,sm8250-ipcc
+      - const: qcom,ipcc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  interrupt-controller: true
+
+  "#interrupt-cells":
+    const: 3
+    description:
+      The first cell is the client-id, the second cell is the signal-id and the
+      third cell is the interrupt type.
+
+  "#mbox-cells":
+    const: 2
+    description:
+      The first cell is the client-id, and the second cell is the signal-id.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-controller
+  - "#interrupt-cells"
+  - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+        #include <dt-bindings/interrupt-controller/arm-gic.h>
+        #include <dt-bindings/mailbox/qcom-ipcc.h>
+
+        mailbox@408000 {
+                compatible = "qcom,sm8250-ipcc", "qcom,ipcc";
+                reg = <0x408000 0x1000>;
+                interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+                interrupt-controller;
+                #interrupt-cells = <3>;
+                #mbox-cells = <2>;
+        };
+
+        smp2p-modem {
+                compatible = "qcom,smp2p";
+                interrupts-extended = <&ipcc_mproc IPCC_CLIENT_MPSS
+                                IPCC_MPROC_SIGNAL_SMP2P IRQ_TYPE_EDGE_RISING>;
+                mboxes = <&ipcc_mproc IPCC_CLIENT_MPSS IPCC_MPROC_SIGNAL_SMP2P>;
+
+                /* Other SMP2P fields */
+        };
diff --git a/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml
new file mode 100644 (file)
index 0000000..0f7451b
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mailbox/sprd-mailbox.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Spreadtrum mailbox controller bindings
+
+maintainers:
+  - Orson Zhai <orsonzhai@gmail.com>
+  - Baolin Wang <baolin.wang7@gmail.com>
+  - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+  compatible:
+    enum:
+      - sprd,sc9860-mailbox
+
+  reg:
+    items:
+      - description: inbox registers' base address
+      - description: outbox registers' base address
+
+  interrupts:
+    items:
+      - description: inbox interrupt
+      - description: outbox interrupt
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: enable
+
+  "#mbox-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - "#mbox-cells"
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    mailbox: mailbox@400a0000 {
+      compatible = "sprd,sc9860-mailbox";
+      reg = <0 0x400a0000 0 0x8000>, <0 0x400a8000 0 0x8000>;
+      #mbox-cells = <1>;
+      clock-names = "enable";
+      clocks = <&aon_gate 53>;
+      interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
new file mode 100644 (file)
index 0000000..db8f115
--- /dev/null
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/arasan,nand-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arasan NAND Flash Controller with ONFI 3.1 support device tree bindings
+
+allOf:
+  - $ref: "nand-controller.yaml"
+
+maintainers:
+  - Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+        - enum:
+          - xlnx,zynqmp-nand-controller
+        - enum:
+          - arasan,nfc-v3p10
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Controller clock
+      - description: NAND bus clock
+
+  clock-names:
+    items:
+      - const: controller
+      - const: bus
+
+  interrupts:
+    maxItems: 1
+
+  "#address-cells": true
+  "#size-cells": true
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - interrupts
+
+additionalProperties: true
+
+examples:
+  - |
+    nfc: nand-controller@ff100000 {
+        compatible = "xlnx,zynqmp-nand-controller", "arasan,nfc-v3p10";
+        reg = <0x0 0xff100000 0x0 0x1000>;
+        clock-names = "controller", "bus";
+        clocks = <&clk200>, <&clk100>;
+        interrupt-parent = <&gic>;
+        interrupts = <0 14 4>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+    };
index 05651a6..44335a4 100644 (file)
@@ -20,6 +20,8 @@ Required properties:
                      "brcm,brcmnand" and an appropriate version compatibility
                      string, like "brcm,brcmnand-v7.0"
                      Possible values:
+                         brcm,brcmnand-v2.1
+                         brcm,brcmnand-v2.2
                          brcm,brcmnand-v4.0
                          brcm,brcmnand-v5.0
                          brcm,brcmnand-v6.0
index afbbd87..4a39698 100644 (file)
@@ -61,6 +61,9 @@ Optional properties:
   clobbered.
 - lock : Do not unlock the partition at initialization time (not supported on
   all devices)
+- slc-mode: This parameter, if present, allows one to emulate SLC mode on a
+  partition attached to an MLC NAND thus making this partition immune to
+  paired-pages corruptions
 
 Examples:
 
index 6859227..9d16d41 100644 (file)
@@ -83,9 +83,14 @@ properties.
 
 Required properties:
 - opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer. This is a
-  required property for all device nodes but devices like power domains. The
-  power domain nodes must have another (implementation dependent) property which
-  uniquely identifies the OPP nodes.
+  required property for all device nodes, unless another "required" property to
+  uniquely identify the OPP nodes exists. Devices like power domains must have
+  another (implementation dependent) property.
+
+- opp-peak-kBps: Peak bandwidth in kilobytes per second, expressed as an array
+  of 32-bit big-endian integers. Each element of the array represents the
+  peak bandwidth value of each interconnect path. The number of elements should
+  match the number of interconnect paths.
 
 Optional properties:
 - opp-microvolt: voltage in micro Volts.
@@ -132,6 +137,12 @@ Optional properties:
 - opp-level: A value representing the performance level of the device,
   expressed as a 32-bit integer.
 
+- opp-avg-kBps: Average bandwidth in kilobytes per second, expressed as an array
+  of 32-bit big-endian integers. Each element of the array represents the
+  average bandwidth value of each interconnect path. The number of elements
+  should match the number of interconnect paths. This property is only
+  meaningful in OPP tables where opp-peak-kBps is present.
+
 - clock-latency-ns: Specifies the maximum possible transition latency (in
   nanoseconds) for switching to this OPP from any other OPP.
 
index e9b8360..c80a110 100644 (file)
@@ -41,3 +41,7 @@ Temperature
 Pressure
 ----------------------------------------
 -kpascal       : kilopascal
+
+Throughput
+----------------------------------------
+-kBps          : kilobytes per second
index 34cf70e..4bb513a 100644 (file)
@@ -14,8 +14,8 @@ For instance:
        dai-tdm-slot-tx-mask = <0 1>;
        dai-tdm-slot-rx-mask = <1 0>;
 
-And for each spcified driver, there could be one .of_xlate_tdm_slot_mask()
-to specify a explicit mapping of the channels and the slots. If it's absent
+And for each specified driver, there could be one .of_xlate_tdm_slot_mask()
+to specify an explicit mapping of the channels and the slots. If it's absent
 the default snd_soc_of_xlate_tdm_slot_mask() will be used to generating the
 tx and rx masks.
 
index f7135b0..5da0046 100644 (file)
@@ -186,7 +186,7 @@ COPYRIGHT
 
 Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
 
-License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
 
 This is free software: you are free to change and redistribute it.
 There is NO WARRANTY, to the extent permitted by law.
index 0ca8f15..6bee033 100644 (file)
@@ -175,9 +175,9 @@ illustrated in the following figure::
     B. acpica / master - "master" branch of the git repository at
        <https://github.com/acpica/acpica.git>.
     C. linux-pm / linux-next - "linux-next" branch of the git repository at
-       <http://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git>.
+       <https://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git>.
     D. linux / master - "master" branch of the git repository at
-       <http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git>.
+       <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git>.
 
    Before the linuxized ACPICA patches are sent to the Linux ACPI community
    for review, there is a quality assurance build test process to reduce
@@ -274,6 +274,6 @@ before they become available from the ACPICA release process.
    a diff file indicating the state of the current divergences::
 
    # git clone https://github.com/acpica/acpica
-   # git clone http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+   # git clone https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
    # cd acpica
    # generate/linux/divergences.sh -s ../linux
index 6440783..e0b58c3 100644 (file)
@@ -314,6 +314,7 @@ IOMAP
   devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device
   devm_platform_ioremap_resource_wc()
   devm_platform_ioremap_resource_byname()
+  devm_platform_get_and_ioremap_resource()
   devm_iounmap()
   pcim_iomap()
   pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
index 5544765..0bf8d6e 100644 (file)
@@ -276,8 +276,10 @@ unregisters the partitions in the MTD layer.
     #ifdef MODULE
     static void __exit board_cleanup (void)
     {
-        /* Release resources, unregister device */
-        nand_release (mtd_to_nand(board_mtd));
+        /* Unregister device */
+        WARN_ON(mtd_device_unregister(board_mtd));
+        /* Release resources */
+        nand_cleanup(mtd_to_nand(board_mtd));
 
         /* unmap physical address */
         iounmap(baseaddr);
index 99b515b..eeefe58 100644 (file)
@@ -9,9 +9,9 @@ device driver to overload a bulk endpoint so that multiple transfers can be
 queued at once.
 
 Streams are defined in sections 4.4.6.4 and 8.12.1.4 of the Universal Serial Bus
-3.0 specification at http://www.usb.org/developers/docs/  The USB Attached SCSI
+3.0 specification at https://www.usb.org/developers/docs/  The USB Attached SCSI
 Protocol, which uses streams to queue multiple SCSI commands, can be found on
-the T10 website (http://t10.org/).
+the T10 website (https://t10.org/).
 
 
 Device-side implications
index 5bf7152..10416cc 100644 (file)
@@ -707,12 +707,12 @@ cheerful guidance and support.
 Resources
 =========
 
-USB Home Page: http://www.usb.org
+USB Home Page: https://www.usb.org
 
-linux-usb Mailing List Archives: http://marc.info/?l=linux-usb
+linux-usb Mailing List Archives: https://marc.info/?l=linux-usb
 
 USB On-the-Go Basics:
-http://www.maximintegrated.com/app-notes/index.mvp/id/1822
+https://www.maximintegrated.com/app-notes/index.mvp/id/1822
 
 :ref:`Writing USB Device Drivers <writing-usb-driver>`
 
index eb71156..318605d 100644 (file)
@@ -429,6 +429,7 @@ prototypes::
        int (*lm_grant)(struct file_lock *, struct file_lock *, int);
        void (*lm_break)(struct file_lock *); /* break_lease callback */
        int (*lm_change)(struct file_lock **, int);
+       bool (*lm_breaker_owns_lease)(struct file_lock *);
 
 locking rules:
 
@@ -439,6 +440,7 @@ lm_notify:          yes             yes                     no
 lm_grant:              no              no                      no
 lm_break:              yes             no                      no
 lm_change              yes             no                      no
+lm_breaker_owns_lease: no              no                      no
 ==========             =============   =================       =========
 
 buffer_head
index 9b8930f..1aa7ce0 100644 (file)
@@ -375,7 +375,7 @@ common path elements, the more likely they will exist in dentry cache.
 Papers and other documentation on dcache locking
 ================================================
 
-1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
+1. Scaling dcache with RCU (https://linuxjournal.com/article.php?sid=7124).
 
 2. http://lse.sourceforge.net/locking/dcache/dcache.html
 
index fab3020..7f7ee06 100644 (file)
@@ -7,7 +7,7 @@ The seq_file Interface
        Copyright 2003 Jonathan Corbet <corbet@lwn.net>
 
        This file is originally from the LWN.net Driver Porting series at
-       http://lwn.net/Articles/driver-porting/
+       https://lwn.net/Articles/driver-porting/
 
 
 There are numerous ways for a device driver (or other kernel component) to
@@ -57,7 +57,7 @@ Then concatenate the output files out1 and out2 and get the right
 result. Yes, it is a thoroughly useless module, but the point is to show
 how the mechanism works without getting lost in other details.  (Those
 wanting to see the full source for this module can find it at
-http://lwn.net/Articles/22359/).
+https://lwn.net/Articles/22359/).
 
 Deprecated create_proc_entry
 ============================
index ea73444..31351b1 100644 (file)
@@ -28,14 +28,14 @@ where the micro controller is connected via special GPIOs pins.
 References
 ----------
 
-The C2 Interface main references are at (http://www.silabs.com)
+The C2 Interface main references are at (https://www.silabs.com)
 Silicon Laboratories site], see:
 
 - AN127: FLASH Programming via the C2 Interface at
-http://www.silabs.com/Support Documents/TechnicalDocs/an127.pdf 
+https://www.silabs.com/Support Documents/TechnicalDocs/an127.pdf 
 
 - C2 Specification at
-http://www.silabs.com/pages/DownloadDoc.aspx?FILEURL=Support%20Documents/TechnicalDocs/an127.pdf&src=SearchResults
+https://www.silabs.com/pages/DownloadDoc.aspx?FILEURL=Support%20Documents/TechnicalDocs/an127.pdf&src=SearchResults
 
 however it implements a two wire serial communication protocol (bit
 banging) designed to enable in-system programming, debugging, and
index be00716..6bfd60d 100644 (file)
@@ -46,7 +46,7 @@ and posted this:
        to communicate user requirements to these people is a waste of
        time. They are much too "intelligent" to listen to lesser mortals.
 
-(http://lwn.net/Articles/131776/).
+(https://lwn.net/Articles/131776/).
 
 The reality of the situation was different; the kernel developers were far
 more concerned about system stability, long-term maintenance, and finding
@@ -216,7 +216,7 @@ a non-disclosure agreement.  The Linux Foundation operates an NDA program
 designed to help with this sort of situation; more information can be found
 at:
 
-    http://www.linuxfoundation.org/en/NDA_program
+    https://www.linuxfoundation.org/nda/
 
 This kind of review is often enough to avoid serious problems later on
 without requiring public disclosure of the project.
index 172733c..bf7cbfb 100644 (file)
@@ -29,9 +29,9 @@ long document in its own right.  Instead, the focus here will be on how git
 fits into the kernel development process in particular.  Developers who
 wish to come up to speed with git will find more information at:
 
-       http://git-scm.com/
+       https://git-scm.com/
 
-       http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+       https://www.kernel.org/pub/software/scm/git/docs/user-manual.html
 
 and on various tutorials found on the web.
 
@@ -55,7 +55,7 @@ server with git-daemon is relatively straightforward if you have a system
 which is accessible to the Internet.  Otherwise, free, public hosting sites
 (Github, for example) are starting to appear on the net.  Established
 developers can get an account on kernel.org, but those are not easy to come
-by; see http://kernel.org/faq/ for more information.
+by; see https://kernel.org/faq/ for more information.
 
 The normal git workflow involves the use of a lot of branches.  Each line
 of development can be separated into a separate "topic branch" and
@@ -125,7 +125,7 @@ can affect your ability to get trees pulled in the future.  Quoting Linus:
        to trust things *without* then having to go and check every
        individual change by hand.
 
-(http://lwn.net/Articles/224135/).
+(https://lwn.net/Articles/224135/).
 
 To avoid this kind of situation, ensure that all patches within a given
 branch stick closely to the associated topic; a "driver fixes" branch
index 8395aa2..b32a402 100644 (file)
@@ -16,24 +16,24 @@ distributions runs into internal limits and fails to process the documents
 properly).
 
 Various web sites discuss kernel development at all levels of detail.  Your
-author would like to humbly suggest http://lwn.net/ as a source;
+author would like to humbly suggest https://lwn.net/ as a source;
 information on many specific kernel topics can be found via the LWN kernel
 index at:
 
-       http://lwn.net/Kernel/Index/
+       https://lwn.net/Kernel/Index/
 
 Beyond that, a valuable resource for kernel developers is:
 
-       http://kernelnewbies.org/
+       https://kernelnewbies.org/
 
-And, of course, one should not forget http://kernel.org/, the definitive
+And, of course, one should not forget https://kernel.org/, the definitive
 location for kernel release information.
 
 There are a number of books on kernel development:
 
        Linux Device Drivers, 3rd Edition (Jonathan Corbet, Alessandro
        Rubini, and Greg Kroah-Hartman).  Online at
-       http://lwn.net/Kernel/LDD3/.
+       https://lwn.net/Kernel/LDD3/.
 
        Linux Kernel Development (Robert Love).
 
@@ -46,9 +46,9 @@ information to be found there.
 
 Documentation for git can be found at:
 
-       http://www.kernel.org/pub/software/scm/git/docs/
+       https://www.kernel.org/pub/software/scm/git/docs/
 
-       http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+       https://www.kernel.org/pub/software/scm/git/docs/user-manual.html
 
 
 Conclusion
index a6b4a3a..a3ecb23 100644 (file)
@@ -541,9 +541,9 @@ References and Sources
    :manpage:`syscall(2)` man-page:
    http://man7.org/linux/man-pages/man2/syscall.2.html#NOTES
  - Collated emails from Linus Torvalds discussing the problems with ``ioctl()``:
-   http://yarchive.net/comp/linux/ioctl.html
+   https://yarchive.net/comp/linux/ioctl.html
  - "How to not invent kernel interfaces", Arnd Bergmann,
-   http://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf
+   https://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf
  - LWN article from Michael Kerrisk on avoiding new uses of CAP_SYS_ADMIN:
    https://lwn.net/Articles/486306/
  - Recommendation from Andrew Morton that all related information for a new
index fbb9297..2e7017b 100644 (file)
@@ -229,7 +229,7 @@ Although interdiff may save you a step or two you are generally advised to
 do the additional steps since interdiff can get things wrong in some cases.
 
 Another alternative is ``ketchup``, which is a python script for automatic
-downloading and applying of patches (http://www.selenic.com/ketchup/).
+downloading and applying of patches (https://www.selenic.com/ketchup/).
 
 Other nice tools are diffstat, which shows a summary of changes made by a
 patch; lsdiff, which displays a short listing of affected files in a patch
@@ -241,7 +241,7 @@ the patch contains a given regular expression.
 Where can I download the patches?
 =================================
 
-The patches are available at http://kernel.org/
+The patches are available at https://kernel.org/
 Most recent patches are linked from the front page, but they also have
 specific homes.
 
index 91c5ff8..5cfb54c 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.6              gcc --version
+GNU C                  4.8              gcc --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
index 4934e65..7eb6bd7 100644 (file)
@@ -109,9 +109,9 @@ been properly thought through.
 References
 ==========
 
-[1] http://lwn.net/Articles/233481/
+[1] https://lwn.net/Articles/233481/
 
-[2] http://lwn.net/Articles/233482/
+[2] https://lwn.net/Articles/233482/
 
 Credits
 =======
index d903eb9..0bcf6c1 100644 (file)
@@ -328,7 +328,7 @@ NOTES:
       label (see **netlabel-config**\(8) helper script for details).
 
    5) The NetLabel SCTP peer labeling rules apply as discussed in the following
-      set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t.
+      set of posts tagged "netlabel" at: https://www.paul-moore.com/blog/t.
 
    6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)``
       CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)``
index fbfe669..7887048 100644 (file)
@@ -29,7 +29,7 @@ u"""
 
     Used tools:
 
-    * ``dot(1)``: Graphviz (http://www.graphviz.org). If Graphviz is not
+    * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
       available, the DOT language is inserted as literal-block.
 
     * SVG to PDF: To generate PDF, you need at least one of this tools:
@@ -41,7 +41,7 @@ u"""
     * generate PDF from SVG / used by PDF (LaTeX) builder
 
     * generate SVG (html-builder) and PDF (latex-builder) from DOT files.
-      DOT: see http://www.graphviz.org/content/dot-language
+      DOT: see https://www.graphviz.org/content/dot-language
 
     """
 
@@ -182,7 +182,7 @@ def setupTools(app):
         kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
     else:
         kernellog.warn(app, "dot(1) not found, for better output quality install "
-                       "graphviz from http://www.graphviz.org")
+                       "graphviz from https://www.graphviz.org")
     if convert_cmd:
         kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
     else:
index 9803e14..38290b9 100644 (file)
@@ -71,7 +71,7 @@ Solution
 
 gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label:
 
-http://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html
+https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html
 
 Using the 'asm goto', we can create branches that are either taken or not taken
 by default, without the need to check memory. Then, at run-time, we can patch
index e938aa0..810481e 100644 (file)
@@ -4,7 +4,7 @@ MSR Trace Events
 
 The x86 kernel supports tracing most MSR (Model Specific Register) accesses.
 To see the definition of the MSRs on Intel systems please see the SDM
-at http://www.intel.com/sdm (Volume 3)
+at https://www.intel.com/sdm (Volume 3)
 
 Available trace points:
 
index 5116e8c..fed13ea 100644 (file)
@@ -5,7 +5,7 @@ In-kernel memory-mapped I/O tracing
 
 Home page and links to optional user space tools:
 
-       http://nouveau.freedesktop.org/wiki/MmioTrace
+       https://nouveau.freedesktop.org/wiki/MmioTrace
 
 MMIO tracing was originally developed by Intel around 2003 for their Fault
 Injection Test Harness. In Dec 2006 - Jan 2007, using the code from Intel,
index 6aab27a..e9a2e92 100644 (file)
@@ -634,7 +634,7 @@ Definita in ``include/linux/export.h``
 
 Questa è una variate di `EXPORT_SYMBOL()` che permette di specificare uno
 spazio dei nomi. Lo spazio dei nomi è documentato in
-:doc:`../core-api/symbol-namespaces`
+:doc:`../../../core-api/symbol-namespaces`
 
 :c:func:`EXPORT_SYMBOL_NS_GPL()`
 --------------------------------
@@ -643,7 +643,7 @@ Definita in ``include/linux/export.h``
 
 Questa è una variate di `EXPORT_SYMBOL_GPL()` che permette di specificare uno
 spazio dei nomi. Lo spazio dei nomi è documentato in
-:doc:`../core-api/symbol-namespaces`
+:doc:`../../../core-api/symbol-namespaces`
 
 Procedure e convenzioni
 =======================
index 89abf6d..66d3d65 100644 (file)
@@ -3,6 +3,8 @@
 :Original: :doc:`../../../process/email-clients`
 :Translator: Alessia Mantegazza <amantegazza@vaga.pv.it>
 
+.. _it_email_clients:
+
 Informazioni sui programmi di posta elettronica per Linux
 =========================================================
 
index c709285..76ed074 100644 (file)
@@ -3,6 +3,8 @@
 :Original: :doc:`../../../process/management-style`
 :Translator: Alessia Mantegazza <amantegazza@vaga.pv.it>
 
+.. _it_managementstyle:
+
 Il modello di gestione del kernel Linux
 =======================================
 
index e50fe65..34d041d 100644 (file)
@@ -1842,12 +1842,15 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효
  (*) smp_mb__before_atomic();
  (*) smp_mb__after_atomic();
 
-     이것들은 값을 리턴하지 않는 (더하기, 빼기, 증가, 감소와 같은) 어토믹
-     함수들을 위한, 특히 그것들이 레퍼런스 카운팅에 사용될 때를 위한
-     함수들입니다.  이 함수들은 메모리 배리어를 내포하고 있지는 않습니다.
-
-     이것들은 값을 리턴하지 않으며 어토믹한 (set_bit 과 clear_bit 같은) 비트
-     연산에도 사용될 수 있습니다.
+     이것들은 메모리 배리어를 내포하지 않는 어토믹 RMW 함수를 사용하지만 코드에
+     메모리 배리어가 필요한 경우를 위한 것들입니다.  메모리 배리어를 내포하지
+     않는 어토믹 RMW 함수들의 예로는 더하기, 빼기, (실패한) 조건적
+     오퍼레이션들, _relaxed 함수들이 있으며, atomic_read 나 atomic_set 은 이에
+     해당되지 않습니다.  메모리 배리어가 필요해지는 흔한 예로는 어토믹
+     오퍼레이션을 사용해 레퍼런스 카운트를 수정하는 경우를 들 수 있습니다.
+
+     이것들은 또한 (set_bit 과 clear_bit 같은) 메모리 배리어를 내포하지 않는
+     어토믹 RMW bitop 함수들을 위해서도 사용될 수 있습니다.
 
      한 예로, 객체 하나를 무효한 것으로 표시하고 그 객체의 레퍼런스 카운트를
      감소시키는 다음 코드를 보세요:
index f8a2879..822c4d4 100644 (file)
@@ -2,7 +2,7 @@
 
 .. include:: ../disclaimer-zh_CN.rst
 
-:Original: :ref:`Documentation/filesystems/debugfs.txt <debugfs_index>`
+:Original: :doc:`../../../filesystems/debugfs`
 
 =======
 Debugfs
index d32016d..d1b7270 100644 (file)
@@ -6,7 +6,7 @@ Kernel Samepage Merging
 
 KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y,
 added to the Linux kernel in 2.6.32.  See ``mm/ksm.c`` for its implementation,
-and http://lwn.net/Articles/306704/ and http://lwn.net/Articles/330589/
+and http://lwn.net/Articles/306704/ and https://lwn.net/Articles/330589/
 
 The userspace interface of KSM is described in :ref:`Documentation/admin-guide/mm/ksm.rst <admin_guide_ksm>`
 
index b2220d0..b2f5ff1 100644 (file)
@@ -14,13 +14,13 @@ improve compression ratio of executable data.
 The XZ decompressor in Linux is called XZ Embedded. It supports
 the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
 for integrity checking. The home page of XZ Embedded is at
-<http://tukaani.org/xz/embedded.html>, where you can find the
+<https://tukaani.org/xz/embedded.html>, where you can find the
 latest version and also information about using the code outside
 the Linux kernel.
 
 For userspace, XZ Utils provide a zlib-like compression library
 and a gzip-like command line tool. XZ Utils can be downloaded from
-<http://tukaani.org/xz/>.
+<https://tukaani.org/xz/>.
 
 XZ related components in the kernel
 ===================================
@@ -113,7 +113,7 @@ Reporting bugs
 ==============
 
 Before reporting a bug, please check that it's not fixed already
-at upstream. See <http://tukaani.org/xz/embedded.html> to get the
+at upstream. See <https://tukaani.org/xz/embedded.html> to get the
 latest code.
 
 Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
index d3b57ce..573cf64 100644 (file)
@@ -1305,6 +1305,13 @@ S:       Supported
 W:     http://www.aquantia.com
 F:     drivers/net/ethernet/aquantia/atlantic/aq_ptp*
 
+ARASAN NAND CONTROLLER DRIVER
+M:     Naga Sureshkumar Relli <nagasure@xilinx.com>
+L:     linux-mtd@lists.infradead.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
+F:     drivers/mtd/nand/raw/arasan-nand-controller.c
+
 ARC FRAMEBUFFER DRIVER
 M:     Jaya Kumar <jayalk@intworks.biz>
 S:     Maintained
@@ -3778,9 +3785,8 @@ F:        Documentation/devicetree/bindings/media/cdns,*.txt
 F:     drivers/media/platform/cadence/cdns-csi2*
 
 CADENCE NAND DRIVER
-M:     Piotr Sroka <piotrs@cadence.com>
 L:     linux-mtd@lists.infradead.org
-S:     Maintained
+S:     Orphan
 F:     Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
 F:     drivers/mtd/nand/raw/cadence-nand-controller.c
 
@@ -4704,6 +4710,12 @@ Q:       http://patchwork.linuxtv.org/project/linux-media/list/
 T:     git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/common/cypress_firmware*
 
+CYPRESS CY8CTMA140 TOUCHSCREEN DRIVER
+M:     Linus Walleij <linus.walleij@linaro.org>
+L:     linux-input@vger.kernel.org
+S:     Maintained
+F:     drivers/input/touchscreen/cy8ctma140.c
+
 CYTTSP TOUCHSCREEN DRIVER
 M:     Ferruh Yigit <fery@cypress.com>
 L:     linux-input@vger.kernel.org
@@ -10853,9 +10865,8 @@ F:      Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
 F:     drivers/i2c/busses/i2c-mt7621.c
 
 MEDIATEK NAND CONTROLLER DRIVER
-M:     Xiaolei Li <xiaolei.li@mediatek.com>
 L:     linux-mtd@lists.infradead.org
-S:     Maintained
+S:     Orphan
 F:     Documentation/devicetree/bindings/mtd/mtk-nand.txt
 F:     drivers/mtd/nand/raw/mtk_*
 
@@ -14176,6 +14187,14 @@ L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/qcom_iommu.c
 
+QUALCOMM IPCC MAILBOX DRIVER
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L:     linux-arm-msm@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+F:     drivers/mailbox/qcom-ipcc.c
+F:     include/dt-bindings/mailbox/qcom-ipcc.h
+
 QUALCOMM RMNET DRIVER
 M:     Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
 M:     Sean Tranchetti <stranche@codeaurora.org>
@@ -18109,9 +18128,18 @@ F:     drivers/virtio/
 F:     include/linux/vdpa.h
 F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
-F:     mm/balloon_compaction.c
 F:     tools/virtio/
 
+VIRTIO BALLOON
+M:     "Michael S. Tsirkin" <mst@redhat.com>
+M:     David Hildenbrand <david@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/virtio/virtio_balloon.c
+F:     include/uapi/linux/virtio_balloon.h
+F:     include/linux/balloon_compaction.h
+F:     mm/balloon_compaction.c
+
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
 L:     virtualization@lists.linux-foundation.org
@@ -18177,6 +18205,13 @@ S:     Maintained
 F:     drivers/iommu/virtio-iommu.c
 F:     include/uapi/linux/virtio_iommu.h
 
+VIRTIO MEM DRIVER
+M:     David Hildenbrand <david@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/virtio/virtio_mem.c
+F:     include/uapi/linux/virtio_mem.h
+
 VIRTUAL BOX GUEST DEVICE DRIVER
 M:     Hans de Goede <hdegoede@redhat.com>
 M:     Arnd Bergmann <arnd@arndb.de>
index f5c42a8..53520f8 100644 (file)
@@ -430,8 +430,13 @@ register_cpus(void)
 arch_initcall(register_cpus);
 
 #ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_reboot_handler(int unused)
+{
+       machine_halt();
+}
+
 static const struct sysrq_key_op srm_sysrq_reboot_op = {
-       .handler        = machine_halt,
+       .handler        = sysrq_reboot_handler,
        .help_msg       = "reboot(b)",
        .action_msg     = "Resetting",
        .enable_mask    = SYSRQ_ENABLE_BOOT,
index 2674de6..c9bf2df 100644 (file)
@@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON
 
 config CRYPTO_SHA1_ARM_CE
        tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
-       depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+       depends on KERNEL_MODE_NEON
        select CRYPTO_SHA1_ARM
        select CRYPTO_HASH
        help
@@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE
 
 config CRYPTO_SHA2_ARM_CE
        tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
-       depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+       depends on KERNEL_MODE_NEON
        select CRYPTO_SHA256_ARM
        select CRYPTO_HASH
        help
@@ -96,7 +96,7 @@ config CRYPTO_AES_ARM_BS
 
 config CRYPTO_AES_ARM_CE
        tristate "Accelerated AES using ARMv8 Crypto Extensions"
-       depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+       depends on KERNEL_MODE_NEON
        select CRYPTO_SKCIPHER
        select CRYPTO_LIB_AES
        select CRYPTO_SIMD
@@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE
 
 config CRYPTO_GHASH_ARM_CE
        tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
-       depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+       depends on KERNEL_MODE_NEON
        select CRYPTO_HASH
        select CRYPTO_CRYPTD
        select CRYPTO_GF128MUL
@@ -118,13 +118,13 @@ config CRYPTO_GHASH_ARM_CE
 
 config CRYPTO_CRCT10DIF_ARM_CE
        tristate "CRCT10DIF digest algorithm using PMULL instructions"
-       depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+       depends on KERNEL_MODE_NEON
        depends on CRC_T10DIF
        select CRYPTO_HASH
 
 config CRYPTO_CRC32_ARM_CE
        tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
-       depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+       depends on KERNEL_MODE_NEON
        depends on CRC32
        select CRYPTO_HASH
 
index 7f9d384..8a46ed3 100644 (file)
@@ -1299,6 +1299,14 @@ config COMPAT_VDSO
          You must have a 32-bit build of glibc 2.22 or later for programs
          to seamlessly take advantage of this.
 
+config THUMB2_COMPAT_VDSO
+       bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT
+       depends on COMPAT_VDSO
+       default y
+       help
+         Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
+         otherwise with '-marm'.
+
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
        depends on SYSCTL
@@ -1740,8 +1748,9 @@ config ARM64_DEBUG_PRIORITY_MASKING
 endif
 
 config RELOCATABLE
-       bool
+       bool "Build a relocatable kernel image" if EXPERT
        select ARCH_HAS_RELR
+       default y
        help
          This builds the kernel as a Position Independent Executable (PIE),
          which retains all relocation metadata required to relocate the
index b263e23..a45366c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/efi.h>
 #include <linux/memblock.h>
 #include <linux/psci.h>
+#include <linux/stddef.h>
 
 #include <asm/cputype.h>
 #include <asm/io.h>
  * is therefore used to delimit the MADT GICC structure minimum length
  * appropriately.
  */
-#define ACPI_MADT_GICC_MIN_LENGTH   ACPI_OFFSET(  \
+#define ACPI_MADT_GICC_MIN_LENGTH   offsetof(  \
        struct acpi_madt_generic_interrupt, efficiency_class)
 
 #define BAD_MADT_GICC_ENTRY(entry, end)                                        \
        (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
        (unsigned long)(entry) + (entry)->header.length > (end))
 
-#define ACPI_MADT_GICC_SPE  (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \
+#define ACPI_MADT_GICC_SPE  (offsetof(struct acpi_madt_generic_interrupt, \
        spe_interrupt) + sizeof(u16))
 
 /* Basic configuration for ACPI */
index 7d9cc5e..fb4c275 100644 (file)
@@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
 #define __smp_store_release(p, v)                                      \
 do {                                                                   \
        typeof(p) __p = (p);                                            \
-       union { typeof(*p) __val; char __c[1]; } __u =                  \
-               { .__val = (__force typeof(*p)) (v) };                  \
+       union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u =  \
+               { .__val = (__force __unqual_scalar_typeof(*p)) (v) };  \
        compiletime_assert_atomic_type(*p);                             \
        kasan_check_write(__p, sizeof(*p));                             \
        switch (sizeof(*p)) {                                           \
@@ -110,7 +110,7 @@ do {                                                                        \
 
 #define __smp_load_acquire(p)                                          \
 ({                                                                     \
-       union { typeof(*p) __val; char __c[1]; } __u;                   \
+       union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u;   \
        typeof(p) __p = (p);                                            \
        compiletime_assert_atomic_type(*p);                             \
        kasan_check_read(__p, sizeof(*p));                              \
@@ -136,33 +136,33 @@ do {                                                                      \
                        : "Q" (*__p) : "memory");                       \
                break;                                                  \
        }                                                               \
-       __u.__val;                                                      \
+       (typeof(*p))__u.__val;                                          \
 })
 
 #define smp_cond_load_relaxed(ptr, cond_expr)                          \
 ({                                                                     \
        typeof(ptr) __PTR = (ptr);                                      \
-       typeof(*ptr) VAL;                                               \
+       __unqual_scalar_typeof(*ptr) VAL;                               \
        for (;;) {                                                      \
                VAL = READ_ONCE(*__PTR);                                \
                if (cond_expr)                                          \
                        break;                                          \
                __cmpwait_relaxed(__PTR, VAL);                          \
        }                                                               \
-       VAL;                                                            \
+       (typeof(*ptr))VAL;                                              \
 })
 
 #define smp_cond_load_acquire(ptr, cond_expr)                          \
 ({                                                                     \
        typeof(ptr) __PTR = (ptr);                                      \
-       typeof(*ptr) VAL;                                               \
+       __unqual_scalar_typeof(*ptr) VAL;                               \
        for (;;) {                                                      \
                VAL = smp_load_acquire(__PTR);                          \
                if (cond_expr)                                          \
                        break;                                          \
                __cmpwait_relaxed(__PTR, VAL);                          \
        }                                                               \
-       VAL;                                                            \
+       (typeof(*ptr))VAL;                                              \
 })
 
 #include <asm-generic/barrier.h>
index 15e80c8..5df4936 100644 (file)
@@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu)
        return 0;
 }
 
-static int debug_monitors_init(void)
+static int __init debug_monitors_init(void)
 {
        return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
                                 "arm64/debug_monitors:starting",
index 8618faa..86a5cf9 100644 (file)
@@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
 
        if (addr == FTRACE_ADDR)
                return &plt[FTRACE_PLT_IDX];
-       if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS))
+       if (addr == FTRACE_REGS_ADDR &&
+           IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
                return &plt[FTRACE_REGS_PLT_IDX];
 #endif
        return NULL;
index 3fd2c11..93b3844 100644 (file)
@@ -319,6 +319,10 @@ void __init setup_arch(char **cmdline_p)
 
        xen_early_init();
        efi_init();
+
+       if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
+            pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+
        arm64_memblock_init();
 
        paging_init();
index 3964738..7ea1e82 100644 (file)
@@ -105,6 +105,14 @@ VDSO_CFLAGS += -D__uint128_t='void*'
 VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
 VDSO_CFLAGS += -Wno-int-to-pointer-cast
 
+# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
+# unreliable.
+ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y)
+VDSO_CFLAGS += -mthumb -fomit-frame-pointer
+else
+VDSO_CFLAGS += -marm
+endif
+
 VDSO_AFLAGS := $(VDSO_CAFLAGS)
 VDSO_AFLAGS += -D__ASSEMBLY__
 
index 60eccae..78b87a6 100644 (file)
@@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data)
        return tmp + (tmp >> 64);
 }
 
-unsigned int do_csum(const unsigned char *buff, int len)
+/*
+ * We over-read the buffer and this makes KASAN unhappy. Instead, disable
+ * instrumentation and call kasan explicitly.
+ */
+unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
 {
        unsigned int offset, shift, sum;
        const u64 *ptr;
@@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
         * odd/even alignment, and means we can ignore it until the very end.
         */
        shift = offset * 8;
-       data = READ_ONCE_NOCHECK(*ptr++);
+       data = *ptr++;
 #ifdef __LITTLE_ENDIAN
        data = (data >> shift) << shift;
 #else
@@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len)
        while (unlikely(len > 64)) {
                __uint128_t tmp1, tmp2, tmp3, tmp4;
 
-               tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
-               tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
-               tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
-               tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+               tmp1 = *(__uint128_t *)ptr;
+               tmp2 = *(__uint128_t *)(ptr + 2);
+               tmp3 = *(__uint128_t *)(ptr + 4);
+               tmp4 = *(__uint128_t *)(ptr + 6);
 
                len -= 64;
                ptr += 8;
@@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
                __uint128_t tmp;
 
                sum64 = accumulate(sum64, data);
-               tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+               tmp = *(__uint128_t *)ptr;
 
                len -= 16;
                ptr += 2;
@@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
        }
        if (len > 0) {
                sum64 = accumulate(sum64, data);
-               data = READ_ONCE_NOCHECK(*ptr);
+               data = *ptr;
                len -= 8;
        }
        /*
index 62b0eb6..84eab0f 100644 (file)
@@ -216,8 +216,10 @@ static int __init mcf_pci_init(void)
 
        /* Keep a virtual mapping to IO/config space active */
        iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
-       if (iospace == 0)
+       if (iospace == 0) {
+               pci_free_host_bridge(bridge);
                return -ENODEV;
+       }
        pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
                (u32) iospace);
 
index 27fa946..2b746f5 100644 (file)
@@ -48,7 +48,6 @@ CONFIG_MTD_CFI_STAA=y
 CONFIG_MTD_ROM=y
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
 CONFIG_MTD_SPI_NOR=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
index a24cfe4..dcfb693 100644 (file)
@@ -42,7 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
        __put_user_asm(__pu_err, __pu_val, ptr, l);     \
        break;                                          \
     case 8:                                            \
-       memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
+       memcpy((void __force *)ptr, &__pu_val, sizeof(*(ptr))); \
        break;                                          \
     default:                                           \
        __pu_err = __put_user_bad();                    \
@@ -60,7 +60,7 @@ extern int __put_user_bad(void);
  * aliasing issues.
  */
 
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __user *)(x))
 
 #define __put_user_asm(err,x,ptr,bwl)                          \
        __asm__ ("move" #bwl " %0,%1"                           \
@@ -85,7 +85,7 @@ extern int __put_user_bad(void);
            u64 l;                                              \
            __typeof__(*(ptr)) t;                               \
        } __gu_val;                                             \
-       memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l));           \
+       memcpy(&__gu_val.l, (const void __force *)ptr, sizeof(__gu_val.l)); \
        (x) = __gu_val.t;                                       \
        break;                                                  \
     }                                                          \
index 45a0556..1136257 100644 (file)
@@ -264,7 +264,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 #if defined(CONFIG_PPC_8xx)
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
 {
-       pmd_t *pmd = pmd_ptr(mm, addr);
+       pmd_t *pmd = pmd_off(mm, addr);
        pte_basic_t val;
        pte_basic_t *entry = &ptep->pte;
        int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
index c733007..128192e 100644 (file)
@@ -12,64 +12,70 @@ config 32BIT
 
 config RISCV
        def_bool y
-       select OF
-       select OF_EARLY_FLATTREE
-       select OF_IRQ
+       select ARCH_CLOCKSOURCE_INIT
        select ARCH_HAS_BINFMT_FLAT
+       select ARCH_HAS_DEBUG_VIRTUAL if MMU
        select ARCH_HAS_DEBUG_WX
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select ARCH_HAS_GIGANTIC_PAGE
+       select ARCH_HAS_MMIOWB
+       select ARCH_HAS_PTE_SPECIAL
+       select ARCH_HAS_SET_DIRECT_MAP
+       select ARCH_HAS_SET_MEMORY
+       select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_FRAME_POINTERS
+       select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
        select CLONE_BACKWARDS
        select COMMON_CLK
+       select EDAC_SUPPORT
+       select GENERIC_ARCH_TOPOLOGY if SMP
+       select GENERIC_ATOMIC64 if !64BIT
        select GENERIC_CLOCKEVENTS
+       select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+       select GENERIC_IOREMAP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_SHOW
        select GENERIC_PCI_IOMAP
+       select GENERIC_PTDUMP if MMU
        select GENERIC_SCHED_CLOCK
+       select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER if MMU
        select GENERIC_STRNLEN_USER if MMU
-       select GENERIC_SMP_IDLE_THREAD
-       select GENERIC_ATOMIC64 if !64BIT
-       select GENERIC_IOREMAP
-       select GENERIC_PTDUMP if MMU
+       select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+       select HANDLE_DOMAIN_IRQ
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_KASAN if MMU && 64BIT
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_KGDB_QXFER_PKT
+       select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_TRACEHOOK
        select HAVE_ASM_MODVERSIONS
+       select HAVE_COPY_THREAD_TLS
        select HAVE_DMA_CONTIGUOUS if MMU
+       select HAVE_EBPF_JIT if MMU
        select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_GENERIC_VDSO if MMU && 64BIT
+       select HAVE_PCI
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_DOMAIN
-       select SPARSE_IRQ
-       select SYSCTL_EXCEPTION_TRACE
-       select HAVE_ARCH_TRACEHOOK
-       select HAVE_PCI
        select MODULES_USE_ELF_RELA if MODULES
        select MODULE_SECTIONS if MODULES
-       select THREAD_INFO_IN_TASK
+       select OF
+       select OF_EARLY_FLATTREE
+       select OF_IRQ
        select PCI_DOMAINS_GENERIC if PCI
        select PCI_MSI if PCI
+       select RISCV_INTC
        select RISCV_TIMER
-       select GENERIC_IRQ_MULTI_HANDLER
-       select GENERIC_ARCH_TOPOLOGY if SMP
-       select ARCH_HAS_PTE_SPECIAL
-       select ARCH_HAS_MMIOWB
-       select ARCH_HAS_DEBUG_VIRTUAL if MMU
-       select HAVE_EBPF_JIT if MMU
-       select EDAC_SUPPORT
-       select ARCH_HAS_GIGANTIC_PAGE
-       select ARCH_HAS_SET_DIRECT_MAP
-       select ARCH_HAS_SET_MEMORY
-       select ARCH_HAS_STRICT_KERNEL_RWX if MMU
-       select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
        select SPARSEMEM_STATIC if 32BIT
-       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
-       select HAVE_ARCH_MMAP_RND_BITS if MMU
-       select ARCH_HAS_GCOV_PROFILE_ALL
-       select HAVE_COPY_THREAD_TLS
-       select HAVE_ARCH_KASAN if MMU && 64BIT
-       select HAVE_ARCH_KGDB
-       select HAVE_ARCH_KGDB_QXFER_PKT
+       select SPARSE_IRQ
+       select SYSCTL_EXCEPTION_TRACE
+       select THREAD_INFO_IN_TASK
 
 config ARCH_MMAP_RND_BITS_MIN
        default 18 if 64BIT
@@ -196,11 +202,11 @@ config ARCH_RV64I
        bool "RV64I"
        select 64BIT
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
-       select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE if MMU
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACER
        select SWIOTLB if MMU
 
 endchoice
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644 (file)
index 0000000..4821855
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
index 6e1b0e0..9807ad1 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/linkage.h>
 
-#define NR_IRQS         0
-
-void riscv_timer_interrupt(void);
-void riscv_software_interrupt(void);
-
 #include <asm-generic/irq.h>
 
 #endif /* _ASM_RISCV_IRQ_H */
index 3ddb798..bdddcd5 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/const.h>
 
+#include <vdso/processor.h>
+
 #include <asm/ptrace.h>
 
 /*
@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task)
 extern unsigned long get_wchan(struct task_struct *p);
 
 
-static inline void cpu_relax(void)
-{
-#ifdef __riscv_muldiv
-       int dummy;
-       /* In lieu of a halt instruction, induce a long-latency stall. */
-       __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
-#endif
-       barrier();
-}
-
 static inline void wait_for_interrupt(void)
 {
        __asm__ __volatile__ ("wfi");
@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void)
 
 struct device_node;
 int riscv_of_processor_hartid(struct device_node *node);
+int riscv_of_parent_hartid(struct device_node *node);
 
 extern void riscv_fill_hwcap(void);
 
index f4c7cfd..40bb1c1 100644 (file)
@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec);
 /* SMP initialization hook for setup_arch */
 void __init setup_smp(void);
 
+/* Called from C code, this handles an IPI. */
+void handle_IPI(struct pt_regs *regs);
+
 /* Hook for the generic smp_call_function_many() routine. */
 void arch_send_call_function_ipi_mask(struct cpumask *mask);
 
index 7a7fce6..8454f74 100644 (file)
 
 #include <linux/types.h>
 
+#ifndef GENERIC_TIME_VSYSCALL
 struct vdso_data {
 };
+#endif
 
 /*
  * The VDSO symbols are mapped into Linux so we can just use regular symbol
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644 (file)
index 0000000..df6ea65
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES   \
+       VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644 (file)
index 0000000..c8e8186
--- /dev/null
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES  1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+                         struct timezone *_tz)
+{
+       register struct __kernel_old_timeval *tv asm("a0") = _tv;
+       register struct timezone *tz asm("a1") = _tz;
+       register long ret asm("a0");
+       register long nr asm("a7") = __NR_gettimeofday;
+
+       asm volatile ("ecall\n"
+                     : "=r" (ret)
+                     : "r"(tv), "r"(tz), "r"(nr)
+                     : "memory");
+
+       return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+       register clockid_t clkid asm("a0") = _clkid;
+       register struct __kernel_timespec *ts asm("a1") = _ts;
+       register long ret asm("a0");
+       register long nr asm("a7") = __NR_clock_gettime;
+
+       asm volatile ("ecall\n"
+                     : "=r" (ret)
+                     : "r"(clkid), "r"(ts), "r"(nr)
+                     : "memory");
+
+       return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+       register clockid_t clkid asm("a0") = _clkid;
+       register struct __kernel_timespec *ts asm("a1") = _ts;
+       register long ret asm("a0");
+       register long nr asm("a7") = __NR_clock_getres;
+
+       asm volatile ("ecall\n"
+                     : "=r" (ret)
+                     : "r"(clkid), "r"(ts), "r"(nr)
+                     : "memory");
+
+       return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+{
+       /*
+        * The purpose of csr_read(CSR_TIME) is to trap the system into
+        * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+        * architecture, no fence instructions surround the csr_read()
+        */
+       return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+       return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644 (file)
index 0000000..82a5693
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+       int dummy;
+       /* In lieu of a halt instruction, induce a long-latency stall. */
+       __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+       barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644 (file)
index 0000000..82fd5d8
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+       return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
index 40a3c44..6d59e69 100644 (file)
@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node)
        return hart;
 }
 
+/*
+ * Find hart ID of the CPU DT node under which given DT node falls.
+ *
+ * To achieve this, we walk up the DT tree until we find an active
+ * RISC-V core (HART) node and extract the cpuid from it.
+ */
+int riscv_of_parent_hartid(struct device_node *node)
+{
+       for (; node; node = node->parent) {
+               if (of_device_is_compatible(node, "riscv"))
+                       return riscv_of_processor_hartid(node);
+       }
+
+       return -1;
+}
+
 #ifdef CONFIG_PROC_FS
 
 static void print_isa(struct seq_file *f, const char *isa)
index 56d071b..cae7e6d 100644 (file)
@@ -106,7 +106,9 @@ _save_context:
 
        /* Handle interrupts */
        move a0, sp /* pt_regs */
-       tail do_IRQ
+       la a1, handle_arch_irq
+       REG_L a1, (a1)
+       jr a1
 1:
        /*
         * Exceptions run with interrupts enabled or disabled depending on the
index 345c4f2..7207fa0 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/interrupt.h>
 #include <linux/irqchip.h>
-#include <linux/irqdomain.h>
 #include <linux/seq_file.h>
 #include <asm/smp.h>
 
@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-
-       irq_enter();
-       switch (regs->cause & ~CAUSE_IRQ_FLAG) {
-       case RV_IRQ_TIMER:
-               riscv_timer_interrupt();
-               break;
-#ifdef CONFIG_SMP
-       case RV_IRQ_SOFT:
-               /*
-                * We only use software interrupts to pass IPIs, so if a non-SMP
-                * system gets one, then we don't know what to do.
-                */
-               riscv_software_interrupt();
-               break;
-#endif
-       case RV_IRQ_EXT:
-               handle_arch_irq(regs);
-               break;
-       default:
-               pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
-               BUG();
-       }
-       irq_exit();
-
-       set_irq_regs(old_regs);
-}
-
 void __init init_IRQ(void)
 {
        irqchip_init();
+       if (!handle_arch_irq)
+               panic("No interrupt controller found.");
 }
index 5805791..d4a64df 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/kprobes.h>
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
+#include <asm/patch.h>
 
 struct patch_insn {
        void *addr;
index a65a8fa..b1d4f45 100644 (file)
@@ -123,11 +123,14 @@ static inline void clear_ipi(void)
                clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
 }
 
-void riscv_software_interrupt(void)
+void handle_IPI(struct pt_regs *regs)
 {
+       struct pt_regs *old_regs = set_irq_regs(regs);
        unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
        unsigned long *stats = ipi_data[smp_processor_id()].stats;
 
+       irq_enter();
+
        clear_ipi();
 
        while (true) {
@@ -138,7 +141,7 @@ void riscv_software_interrupt(void)
 
                ops = xchg(pending_ipis, 0);
                if (ops == 0)
-                       return;
+                       goto done;
 
                if (ops & (1 << IPI_RESCHEDULE)) {
                        stats[IPI_RESCHEDULE]++;
@@ -160,6 +163,10 @@ void riscv_software_interrupt(void)
                /* Order data access and bit testing. */
                mb();
        }
+
+done:
+       irq_exit();
+       set_irq_regs(old_regs);
 }
 
 static const char * const ipi_names[] = {
index 6a53c02..4d3a104 100644 (file)
@@ -26,3 +26,12 @@ void __init time_init(void)
        lpj_fine = riscv_timebase / HZ;
        timer_probe();
 }
+
+void clocksource_arch_init(struct clocksource *cs)
+{
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+       cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+       cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+#endif
+}
index 5080fdf..ecec177 100644 (file)
@@ -183,6 +183,4 @@ void trap_init(void)
        csr_write(CSR_SCRATCH, 0);
        /* Set the exception vector address */
        csr_write(CSR_TVEC, &handle_exception);
-       /* Enable interrupts */
-       csr_write(CSR_IE, IE_SIE);
 }
index e827fae..6782042 100644 (file)
 #include <linux/slab.h>
 #include <linux/binfmts.h>
 #include <linux/err.h>
-
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
 #include <asm/vdso.h>
+#endif
 
 extern char vdso_start[], vdso_end[];
 
@@ -26,7 +30,7 @@ static union {
        struct vdso_data        data;
        u8                      page[PAGE_SIZE];
 } vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
 
 static int __init vdso_init(void)
 {
@@ -75,13 +79,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
         */
        mm->context.vdso = (void *)vdso_base;
 
-       ret = install_special_mapping(mm, vdso_base, vdso_len,
+       ret =
+          install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
                (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
                vdso_pagelist);
 
-       if (unlikely(ret))
+       if (unlikely(ret)) {
                mm->context.vdso = NULL;
+               goto end;
+       }
 
+       vdso_base += (vdso_pages << PAGE_SHIFT);
+       ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+       if (unlikely(ret))
+               mm->context.vdso = NULL;
 end:
        mmap_write_unlock(mm);
        return ret;
@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 {
        if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
                return "[vdso]";
+       if (vma->vm_mm && (vma->vm_start ==
+                          (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+               return "[vdso_data]";
        return NULL;
 }
index 4c8b2a4..38ba55b 100644 (file)
@@ -1,12 +1,14 @@
 # SPDX-License-Identifier: GPL-2.0-only
 # Copied from arch/tile/kernel/vdso/Makefile
 
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
 # Symbols present in the vdso
 vdso-syms  = rt_sigreturn
 ifdef CONFIG_64BIT
-vdso-syms += gettimeofday
-vdso-syms += clock_gettime
-vdso-syms += clock_getres
+vdso-syms += vgettimeofday
 endif
 vdso-syms += getcpu
 vdso-syms += flush_icache
@@ -14,6 +16,10 @@ vdso-syms += flush_icache
 # Files to link into the vdso
 obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
 
+ifneq ($(c-gettimeofday-y),)
+  CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
 # Build rules
 targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
 obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
deleted file mode 100644 (file)
index 91378a5..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
-       .text
-/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
-ENTRY(__vdso_clock_getres)
-       .cfi_startproc
-       /* For now, just do the syscall. */
-       li a7, __NR_clock_getres
-       ecall
-       ret
-       .cfi_endproc
-ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
deleted file mode 100644 (file)
index 5371fd9..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
-       .text
-/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
-ENTRY(__vdso_clock_gettime)
-       .cfi_startproc
-       /* For now, just do the syscall. */
-       li a7, __NR_clock_gettime
-       ecall
-       ret
-       .cfi_endproc
-ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
deleted file mode 100644 (file)
index e6fb8af..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
-       .text
-/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
-ENTRY(__vdso_gettimeofday)
-       .cfi_startproc
-       /* For now, just do the syscall. */
-       li a7, __NR_gettimeofday
-       ecall
-       ret
-       .cfi_endproc
-ENDPROC(__vdso_gettimeofday)
index f66a091..e6f558b 100644 (file)
@@ -2,11 +2,13 @@
 /*
  * Copyright (C) 2012 Regents of the University of California
  */
+#include <asm/page.h>
 
 OUTPUT_ARCH(riscv)
 
 SECTIONS
 {
+       PROVIDE(_vdso_data = . + PAGE_SIZE);
        . = SIZEOF_HEADERS;
 
        .hash           : { *(.hash) }                  :text
diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c
new file mode 100644 (file)
index 0000000..d264943
--- /dev/null
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copied from arch/arm64/kernel/vdso/vgettimeofday.c
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+       return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+       return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
+{
+       return __cvdso_clock_getres(clock_id, res);
+}
index 9996f49..f4adb36 100644 (file)
@@ -480,17 +480,6 @@ static void __init setup_vm_final(void)
        csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
        local_flush_tlb_all();
 }
-
-void free_initmem(void)
-{
-       unsigned long init_begin = (unsigned long)__init_begin;
-       unsigned long init_end = (unsigned long)__init_end;
-
-       /* Make the region as non-execuatble. */
-       set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
-       free_initmem_default(POISON_FREE_INITMEM);
-}
-
 #else
 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 {
index d015908..8fff93a 100644 (file)
@@ -129,7 +129,7 @@ struct vector_private {
        struct vector_estats estats;
        struct sock_fprog *bpf;
 
-       char user[0];
+       char user[];
 };
 
 extern int build_transport_data(struct vector_private *vp);
index aa28e9e..c4a0f26 100644 (file)
@@ -29,6 +29,7 @@
 #include <netdb.h>
 #include <stdlib.h>
 #include <os.h>
+#include <limits.h>
 #include <um_malloc.h>
 #include "vector_user.h"
 
@@ -42,6 +43,9 @@
 #define TRANS_RAW "raw"
 #define TRANS_RAW_LEN strlen(TRANS_RAW)
 
+#define TRANS_FD "fd"
+#define TRANS_FD_LEN strlen(TRANS_FD)
+
 #define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
 #define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
 #define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
@@ -347,6 +351,59 @@ unix_cleanup:
        return NULL;
 }
 
+static int strtofd(const char *nptr)
+{
+       long fd;
+       char *endptr;
+
+       if (nptr == NULL)
+               return -1;
+
+       errno = 0;
+       fd = strtol(nptr, &endptr, 10);
+       if (nptr == endptr ||
+               errno != 0 ||
+               *endptr != '\0' ||
+               fd < 0 ||
+               fd > INT_MAX) {
+               return -1;
+       }
+       return fd;
+}
+
+static struct vector_fds *user_init_fd_fds(struct arglist *ifspec)
+{
+       int fd = -1;
+       char *fdarg = NULL;
+       struct vector_fds *result = NULL;
+
+       fdarg = uml_vector_fetch_arg(ifspec, "fd");
+       fd = strtofd(fdarg);
+       if (fd == -1) {
+               printk(UM_KERN_ERR "fd open: bad or missing fd argument");
+               goto fd_cleanup;
+       }
+
+       result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+       if (result == NULL) {
+               printk(UM_KERN_ERR "fd open: allocation failed");
+               goto fd_cleanup;
+       }
+
+       result->rx_fd = fd;
+       result->tx_fd = fd;
+       result->remote_addr_size = 0;
+       result->remote_addr = NULL;
+       return result;
+
+fd_cleanup:
+       if (fd >= 0)
+               os_close_file(fd);
+       if (result != NULL)
+               kfree(result);
+       return NULL;
+}
+
 static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
 {
        int rxfd = -1, txfd = -1;
@@ -578,6 +635,8 @@ struct vector_fds *uml_vector_user_open(
                return user_init_socket_fds(parsed, ID_L2TPV3);
        if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
                return user_init_unix_fds(parsed, ID_BESS);
+       if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0)
+               return user_init_fd_fds(parsed);
        return NULL;
 }
 
index 6c71b60..6f147cd 100644 (file)
@@ -78,7 +78,7 @@ struct vhost_user_config {
        u32 offset;
        u32 size;
        u32 flags;
-       u8 payload[0]; /* Variable length */
+       u8 payload[]; /* Variable length */
 } __packed;
 
 struct vhost_user_vring_state {
index be54d36..351aee5 100644 (file)
@@ -74,7 +74,7 @@ struct virtio_uml_vq_info {
 
 extern unsigned long long physmem_size, highmem;
 
-#define vu_err(vu_dev, ...)    dev_err(&(vu_dev)->pdev->dev, __VA_ARGS__)
+#define vu_err(vu_dev, ...)    dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
 
 /* Vhost-user protocol */
 
index 26ecbd6..e4421db 100644 (file)
@@ -6,6 +6,7 @@
 #include <stdio.h>
 #include <unistd.h>
 #include <stdlib.h>
+#include <string.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <signal.h>
@@ -289,7 +290,7 @@ int os_write_file(int fd, const void *buf, int len)
 
 int os_sync_file(int fd)
 {
-       int n = fsync(fd);
+       int n = fdatasync(fd);
 
        if (n < 0)
                return -errno;
index ef76a04..d7acae4 100644 (file)
@@ -32,13 +32,6 @@ extern pmd_t initial_pg_pmd[];
 void paging_init(void);
 void sync_initial_page_table(void);
 
-/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok( ..)'
- */
-#undef TEST_ACCESS_OK
-
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level.h>
 #else
index ab03fed..f8d65c9 100644 (file)
@@ -135,26 +135,30 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
        typeof(ubuf->st_gid) gid = 0;
        SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
-       if (!access_ok(ubuf, sizeof(struct stat64)) ||
-           __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
-           __put_user(stat->ino, &ubuf->__st_ino) ||
-           __put_user(stat->ino, &ubuf->st_ino) ||
-           __put_user(stat->mode, &ubuf->st_mode) ||
-           __put_user(stat->nlink, &ubuf->st_nlink) ||
-           __put_user(uid, &ubuf->st_uid) ||
-           __put_user(gid, &ubuf->st_gid) ||
-           __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
-           __put_user(stat->size, &ubuf->st_size) ||
-           __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
-           __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
-           __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
-           __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
-           __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
-           __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
-           __put_user(stat->blksize, &ubuf->st_blksize) ||
-           __put_user(stat->blocks, &ubuf->st_blocks))
+       if (!user_write_access_begin(ubuf, sizeof(struct stat64)))
                return -EFAULT;
+       unsafe_put_user(huge_encode_dev(stat->dev), &ubuf->st_dev, Efault);
+       unsafe_put_user(stat->ino, &ubuf->__st_ino, Efault);
+       unsafe_put_user(stat->ino, &ubuf->st_ino, Efault);
+       unsafe_put_user(stat->mode, &ubuf->st_mode, Efault);
+       unsafe_put_user(stat->nlink, &ubuf->st_nlink, Efault);
+       unsafe_put_user(uid, &ubuf->st_uid, Efault);
+       unsafe_put_user(gid, &ubuf->st_gid, Efault);
+       unsafe_put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev, Efault);
+       unsafe_put_user(stat->size, &ubuf->st_size, Efault);
+       unsafe_put_user(stat->atime.tv_sec, &ubuf->st_atime, Efault);
+       unsafe_put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec, Efault);
+       unsafe_put_user(stat->mtime.tv_sec, &ubuf->st_mtime, Efault);
+       unsafe_put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec, Efault);
+       unsafe_put_user(stat->ctime.tv_sec, &ubuf->st_ctime, Efault);
+       unsafe_put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec, Efault);
+       unsafe_put_user(stat->blksize, &ubuf->st_blksize, Efault);
+       unsafe_put_user(stat->blocks, &ubuf->st_blocks, Efault);
+       user_access_end();
        return 0;
+Efault:
+       user_write_access_end();
+       return -EFAULT;
 }
 
 COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename,
index 238b78e..af9cdb4 100644 (file)
@@ -1252,7 +1252,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
                 * only, there can be valuable data in the rest which needs
                 * to be preserved e.g. on migration.
                 */
-               if (__clear_user((void __user *)addr, sizeof(u32)))
+               if (__put_user(0, (u32 __user *)addr))
                        return 1;
                hv_vcpu->hv_vapic = data;
                kvm_vcpu_mark_page_dirty(vcpu, gfn);
index d5daf35..091c0a0 100644 (file)
@@ -316,7 +316,6 @@ config CRYPTO_AEGIS128
 config CRYPTO_AEGIS128_SIMD
        bool "Support SIMD acceleration for AEGIS-128"
        depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
-       depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800
        default y
 
 config CRYPTO_AEGIS128_AESNI_SSE2
index f2df416..d41eb9e 100644 (file)
@@ -51,6 +51,8 @@ static acpi_adr_space_type acpi_gbl_space_id_list[] = {
        ACPI_ADR_SPACE_IPMI,
        ACPI_ADR_SPACE_GPIO,
        ACPI_ADR_SPACE_GSBUS,
+       ACPI_ADR_SPACE_PLATFORM_COMM,
+       ACPI_ADR_SPACE_PLATFORM_RT,
        ACPI_ADR_SPACE_DATA_TABLE,
        ACPI_ADR_SPACE_FIXED_HARDWARE
 };
index 177ab88..ed9aedf 100644 (file)
@@ -78,7 +78,8 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
        "IPMI",                 /* 0x07 */
        "GeneralPurposeIo",     /* 0x08 */
        "GenericSerialBus",     /* 0x09 */
-       "PCC"                   /* 0x0A */
+       "PCC",                  /* 0x0A */
+       "PlatformRtMechanism"   /* 0x0B */
 };
 
 const char *acpi_ut_get_region_name(u8 space_id)
index b44b12a..94d91c6 100644 (file)
@@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                 * possibly drop references to the power resources in use.
                 */
                state = ACPI_STATE_D3_HOT;
-               /* If _PR3 is not available, use D3hot as the target state. */
+               /* If D3cold is not supported, use D3hot as the target state. */
                if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
                        target_state = state;
        } else if (!device->power.states[state].flags.valid) {
index 47b4969..5be5a97 100644 (file)
@@ -35,6 +35,7 @@ int pxm_to_node(int pxm)
                return NUMA_NO_NODE;
        return pxm_to_node_map[pxm];
 }
+EXPORT_SYMBOL(pxm_to_node);
 
 int node_to_pxm(int node)
 {
index 4da23e7..8777fac 100644 (file)
@@ -918,12 +918,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
 
                if (buffer.length && package
                    && package->type == ACPI_TYPE_PACKAGE
-                   && package->package.count) {
-                       int err = acpi_extract_power_resources(package, 0,
-                                                              &ps->resources);
-                       if (!err)
-                               device->power.flags.power_resources = 1;
-               }
+                   && package->package.count)
+                       acpi_extract_power_resources(package, 0, &ps->resources);
+
                ACPI_FREE(buffer.pointer);
        }
 
@@ -970,14 +967,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
                acpi_bus_init_power_state(device, i);
 
        INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
-       if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
-               device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
 
-       /* Set defaults for D0 and D3hot states (always valid) */
+       /* Set the defaults for D0 and D3hot (always supported). */
        device->power.states[ACPI_STATE_D0].flags.valid = 1;
        device->power.states[ACPI_STATE_D0].power = 100;
        device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
 
+       /*
+        * Use power resources only if the D0 list of them is populated, because
+        * some platforms may provide _PR3 only to indicate D3cold support and
+        * in those cases the power resources list returned by it may be bogus.
+        */
+       if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
+               device->power.flags.power_resources = 1;
+               /*
+                * D3cold is supported if the D3hot list of power resources is
+                * not empty.
+                */
+               if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
+                       device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+       }
+
        if (acpi_bus_init_power(device))
                device->flags.power_manageable = 0;
 }
index a0a7ae7..d82b3b7 100644 (file)
@@ -3535,7 +3535,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
 }
 
 static int cdrom_sysctl_info(struct ctl_table *ctl, int write,
-                           void __user *buffer, size_t *lenp, loff_t *ppos)
+                           void *buffer, size_t *lenp, loff_t *ppos)
 {
        int pos;
        char *info = cdrom_sysctl_settings.info;
index a7cf6aa..2a41b21 100644 (file)
@@ -2087,7 +2087,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
  * Return entropy available scaled to integral bits
  */
 static int proc_do_entropy(struct ctl_table *table, int write,
-                          void __user *buffer, size_t *lenp, loff_t *ppos)
+                          void *buffer, size_t *lenp, loff_t *ppos)
 {
        struct ctl_table fake_table;
        int entropy_count;
index c4f15c4..9de1dab 100644 (file)
 #include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/sched_clock.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
 #include <asm/smp.h>
 #include <asm/sbi.h>
 
@@ -39,6 +42,7 @@ static int riscv_clock_next_event(unsigned long delta,
        return 0;
 }
 
+static unsigned int riscv_clock_event_irq;
 static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
        .name                   = "riscv_timer_clockevent",
        .features               = CLOCK_EVT_FEAT_ONESHOT,
@@ -74,30 +78,36 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
        struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
 
        ce->cpumask = cpumask_of(cpu);
+       ce->irq = riscv_clock_event_irq;
        clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
 
-       csr_set(CSR_IE, IE_TIE);
+       enable_percpu_irq(riscv_clock_event_irq,
+                         irq_get_trigger_type(riscv_clock_event_irq));
        return 0;
 }
 
 static int riscv_timer_dying_cpu(unsigned int cpu)
 {
-       csr_clear(CSR_IE, IE_TIE);
+       disable_percpu_irq(riscv_clock_event_irq);
        return 0;
 }
 
 /* called directly from the low-level interrupt handler */
-void riscv_timer_interrupt(void)
+static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
 
        csr_clear(CSR_IE, IE_TIE);
        evdev->event_handler(evdev);
+
+       return IRQ_HANDLED;
 }
 
 static int __init riscv_timer_init_dt(struct device_node *n)
 {
        int cpuid, hartid, error;
+       struct device_node *child;
+       struct irq_domain *domain;
 
        hartid = riscv_of_processor_hartid(n);
        if (hartid < 0) {
@@ -115,6 +125,25 @@ static int __init riscv_timer_init_dt(struct device_node *n)
        if (cpuid != smp_processor_id())
                return 0;
 
+       domain = NULL;
+       child = of_get_compatible_child(n, "riscv,cpu-intc");
+       if (!child) {
+               pr_err("Failed to find INTC node [%pOF]\n", n);
+               return -ENODEV;
+       }
+       domain = irq_find_host(child);
+       of_node_put(child);
+       if (!domain) {
+               pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
+               return -ENODEV;
+       }
+
+       riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
+       if (!riscv_clock_event_irq) {
+               pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
+               return -ENODEV;
+       }
+
        pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
               __func__, cpuid, hartid);
        error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
@@ -126,6 +155,14 @@ static int __init riscv_timer_init_dt(struct device_node *n)
 
        sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
 
+       error = request_percpu_irq(riscv_clock_event_irq,
+                                   riscv_timer_interrupt,
+                                   "riscv-timer", &riscv_clock_event);
+       if (error) {
+               pr_err("registering percpu irq failed [%d]\n", error);
+               return error;
+       }
+
        error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
                         "clockevents/riscv/timer:starting",
                         riscv_timer_starting_cpu, riscv_timer_dying_cpu);
index 289e8ce..429e5a3 100644 (file)
@@ -126,12 +126,12 @@ static void boost_set_msr_each(void *p_en)
        boost_set_msr(enable);
 }
 
-static int set_boost(int val)
+static int set_boost(struct cpufreq_policy *policy, int val)
 {
-       get_online_cpus();
-       on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
-       put_online_cpus();
-       pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+       on_each_cpu_mask(policy->cpus, boost_set_msr_each,
+                        (void *)(long)val, 1);
+       pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
+                cpumask_pr_args(policy->cpus), val ? "en" : "dis");
 
        return 0;
 }
@@ -162,7 +162,9 @@ static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
        if (ret || val > 1)
                return -EINVAL;
 
-       set_boost(val);
+       get_online_cpus();
+       set_boost(policy, val);
+       put_online_cpus();
 
        return count;
 }
index bda0b24..257d726 100644 (file)
@@ -37,6 +37,7 @@
  * requested etc.
  */
 static struct cppc_cpudata **all_cpu_data;
+static bool boost_supported;
 
 struct cppc_workaround_oem_info {
        char oem_id[ACPI_OEM_ID_SIZE + 1];
@@ -310,7 +311,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
         * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
         */
        policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
-       policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
+       policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
 
        /*
         * Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -318,7 +319,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
         * nonlinear perf
         */
        policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
-       policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
+       policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
 
        policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
        policy->shared_type = cpu->shared_type;
@@ -343,6 +344,13 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        cpu->cur_policy = policy;
 
+       /*
+        * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
+        * is supported.
+        */
+       if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf)
+               boost_supported = true;
+
        /* Set policy->cur to max now. The governors will adjust later. */
        policy->cur = cppc_cpufreq_perf_to_khz(cpu,
                                        cpu->perf_caps.highest_perf);
@@ -410,6 +418,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
        return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
 }
 
+static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
+{
+       struct cppc_cpudata *cpudata;
+       int ret;
+
+       if (!boost_supported) {
+               pr_err("BOOST not supported by CPU or firmware\n");
+               return -EINVAL;
+       }
+
+       cpudata = all_cpu_data[policy->cpu];
+       if (state)
+               policy->max = cppc_cpufreq_perf_to_khz(cpudata,
+                                       cpudata->perf_caps.highest_perf);
+       else
+               policy->max = cppc_cpufreq_perf_to_khz(cpudata,
+                                       cpudata->perf_caps.nominal_perf);
+       policy->cpuinfo.max_freq = policy->max;
+
+       ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 static struct cpufreq_driver cppc_cpufreq_driver = {
        .flags = CPUFREQ_CONST_LOOPS,
        .verify = cppc_verify_policy,
@@ -417,6 +451,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
        .get = cppc_cpufreq_get_rate,
        .init = cppc_cpufreq_cpu_init,
        .stop_cpu = cppc_cpufreq_stop_cpu,
+       .set_boost = cppc_cpufreq_set_boost,
        .name = "cppc_cpufreq",
 };
 
index 26fe8df..79742bb 100644 (file)
@@ -121,6 +121,10 @@ static int resources_available(void)
 
        clk_put(cpu_clk);
 
+       ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+       if (ret)
+               return ret;
+
        name = find_supply_name(cpu_dev);
        /* Platform doesn't require regulator */
        if (!name)
index d03f250..0128de3 100644 (file)
@@ -2532,34 +2532,29 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
 /*********************************************************************
  *               BOOST                                              *
  *********************************************************************/
-static int cpufreq_boost_set_sw(int state)
+static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
 {
-       struct cpufreq_policy *policy;
-
-       for_each_active_policy(policy) {
-               int ret;
+       int ret;
 
-               if (!policy->freq_table)
-                       return -ENXIO;
+       if (!policy->freq_table)
+               return -ENXIO;
 
-               ret = cpufreq_frequency_table_cpuinfo(policy,
-                                                     policy->freq_table);
-               if (ret) {
-                       pr_err("%s: Policy frequency update failed\n",
-                              __func__);
-                       return ret;
-               }
-
-               ret = freq_qos_update_request(policy->max_freq_req, policy->max);
-               if (ret < 0)
-                       return ret;
+       ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+       if (ret) {
+               pr_err("%s: Policy frequency update failed\n", __func__);
+               return ret;
        }
 
+       ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+       if (ret < 0)
+               return ret;
+
        return 0;
 }
 
 int cpufreq_boost_trigger_state(int state)
 {
+       struct cpufreq_policy *policy;
        unsigned long flags;
        int ret = 0;
 
@@ -2570,15 +2565,25 @@ int cpufreq_boost_trigger_state(int state)
        cpufreq_driver->boost_enabled = state;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       ret = cpufreq_driver->set_boost(state);
-       if (ret) {
-               write_lock_irqsave(&cpufreq_driver_lock, flags);
-               cpufreq_driver->boost_enabled = !state;
-               write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-               pr_err("%s: Cannot %s BOOST\n",
-                      __func__, state ? "enable" : "disable");
+       get_online_cpus();
+       for_each_active_policy(policy) {
+               ret = cpufreq_driver->set_boost(policy, state);
+               if (ret)
+                       goto err_reset_state;
        }
+       put_online_cpus();
+
+       return 0;
+
+err_reset_state:
+       put_online_cpus();
+
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
+       cpufreq_driver->boost_enabled = !state;
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+       pr_err("%s: Cannot %s BOOST\n",
+              __func__, state ? "enable" : "disable");
 
        return ret;
 }
index 2e233ad..3d2f143 100644 (file)
@@ -93,7 +93,8 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
 
 static struct cpufreq_driver tegra186_cpufreq_driver = {
        .name = "tegra186",
-       .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+       .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+                       CPUFREQ_NEED_INITIAL_FREQ_CHECK,
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = tegra186_cpufreq_set_target,
        .init = tegra186_cpufreq_init,
index 788c660..cee2a27 100644 (file)
@@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
 
 struct nitrox_device *nitrox_get_first_device(void)
 {
-       struct nitrox_device *ndev = NULL;
+       struct nitrox_device *ndev;
 
        mutex_lock(&devlist_lock);
        list_for_each_entry(ndev, &ndevlist, list) {
@@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
                        break;
        }
        mutex_unlock(&devlist_lock);
-       if (!ndev)
+       if (&ndev->list == &ndevlist)
                return NULL;
 
        refcount_inc(&ndev->refcnt);
index 32dc00d..9f937bd 100644 (file)
@@ -77,7 +77,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
                tag = (u8 *)rctx->auth_tag;
                for (i = 0; i < dd->authsize; i++) {
                        if (tag[i]) {
-                               dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
                                ret = -EBADMSG;
                        }
                }
index 824ddf2..b5aff20 100644 (file)
@@ -1269,13 +1269,17 @@ static int omap_aes_remove(struct platform_device *pdev)
        spin_unlock(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
-               for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+               for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
                        crypto_unregister_skcipher(
                                        &dd->pdata->algs_info[i].algs_list[j]);
+                       dd->pdata->algs_info[i].registered--;
+               }
 
-       for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
+       for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
                aalg = &dd->pdata->aead_algs_info->algs_list[i];
                crypto_unregister_aead(aalg);
+               dd->pdata->aead_algs_info->registered--;
+
        }
 
        crypto_engine_exit(dd->engine);
index cc88b73..94b2dba 100644 (file)
@@ -178,11 +178,17 @@ static void omap_crypto_copy_data(struct scatterlist *src,
                amt = min(src->length - srco, dst->length - dsto);
                amt = min(len, amt);
 
-               srcb = sg_virt(src) + srco;
-               dstb = sg_virt(dst) + dsto;
+               srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
+               dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
 
                memcpy(dstb, srcb, amt);
 
+               if (!PageSlab(sg_page(dst)))
+                       flush_kernel_dcache_page(sg_page(dst));
+
+               kunmap_atomic(srcb);
+               kunmap_atomic(dstb);
+
                srco += amt;
                dsto += amt;
                len -= amt;
index 063ad5d..82691a0 100644 (file)
@@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx {
 };
 
 struct omap_sham_ctx {
-       struct omap_sham_dev    *dd;
-
        unsigned long           flags;
 
        /* fallback stuff */
@@ -750,8 +748,17 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
        int offset = rctx->offset;
        int bufcnt = rctx->bufcnt;
 
-       if (!sg || !sg->length || !nbytes)
+       if (!sg || !sg->length || !nbytes) {
+               if (bufcnt) {
+                       bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
+                       sg_init_table(rctx->sgl, 1);
+                       sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
+                       rctx->sg = rctx->sgl;
+                       rctx->sg_len = 1;
+               }
+
                return 0;
+       }
 
        new_len = nbytes;
 
@@ -895,7 +902,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
        if (hash_later < 0)
                hash_later = 0;
 
-       if (hash_later) {
+       if (hash_later && hash_later <= rctx->buflen) {
                scatterwalk_map_and_copy(rctx->buffer,
                                         req->src,
                                         req->nbytes - hash_later,
@@ -925,27 +932,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
        return 0;
 }
 
+struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+{
+       struct omap_sham_dev *dd;
+
+       if (ctx->dd)
+               return ctx->dd;
+
+       spin_lock_bh(&sham.lock);
+       dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
+       list_move_tail(&dd->list, &sham.dev_list);
+       ctx->dd = dd;
+       spin_unlock_bh(&sham.lock);
+
+       return dd;
+}
+
 static int omap_sham_init(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-       struct omap_sham_dev *dd = NULL, *tmp;
+       struct omap_sham_dev *dd;
        int bs = 0;
 
-       spin_lock_bh(&sham.lock);
-       if (!tctx->dd) {
-               list_for_each_entry(tmp, &sham.dev_list, list) {
-                       dd = tmp;
-                       break;
-               }
-               tctx->dd = dd;
-       } else {
-               dd = tctx->dd;
-       }
-       spin_unlock_bh(&sham.lock);
+       ctx->dd = NULL;
 
-       ctx->dd = dd;
+       dd = omap_sham_find_dev(ctx);
+       if (!dd)
+               return -ENODEV;
 
        ctx->flags = 0;
 
@@ -1215,8 +1230,7 @@ err1:
 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-       struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
-       struct omap_sham_dev *dd = tctx->dd;
+       struct omap_sham_dev *dd = ctx->dd;
 
        ctx->op = op;
 
@@ -1226,7 +1240,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
 static int omap_sham_update(struct ahash_request *req)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-       struct omap_sham_dev *dd = ctx->dd;
+       struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
 
        if (!req->nbytes)
                return 0;
@@ -1319,21 +1333,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
        struct omap_sham_hmac_ctx *bctx = tctx->base;
        int bs = crypto_shash_blocksize(bctx->shash);
        int ds = crypto_shash_digestsize(bctx->shash);
-       struct omap_sham_dev *dd = NULL, *tmp;
        int err, i;
 
-       spin_lock_bh(&sham.lock);
-       if (!tctx->dd) {
-               list_for_each_entry(tmp, &sham.dev_list, list) {
-                       dd = tmp;
-                       break;
-               }
-               tctx->dd = dd;
-       } else {
-               dd = tctx->dd;
-       }
-       spin_unlock_bh(&sham.lock);
-
        err = crypto_shash_setkey(tctx->fallback, key, keylen);
        if (err)
                return err;
@@ -1350,7 +1351,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
 
        memset(bctx->ipad + keylen, 0, bs - keylen);
 
-       if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+       if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
                memcpy(bctx->opad, bctx->ipad, bs);
 
                for (i = 0; i < bs; i++) {
@@ -1571,7 +1572,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_name               = "sha224",
                .cra_driver_name        = "omap-sha224",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA224_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
@@ -1592,7 +1594,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_name               = "sha256",
                .cra_driver_name        = "omap-sha256",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA256_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
@@ -1614,7 +1617,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_name               = "hmac(sha224)",
                .cra_driver_name        = "omap-hmac-sha224",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA224_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
@@ -1637,7 +1641,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_name               = "hmac(sha256)",
                .cra_driver_name        = "omap-hmac-sha256",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA256_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
@@ -1662,7 +1667,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_name               = "sha384",
                .cra_driver_name        = "omap-sha384",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA384_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
@@ -1683,7 +1689,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_name               = "sha512",
                .cra_driver_name        = "omap-sha512",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA512_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
@@ -1705,7 +1712,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_name               = "hmac(sha384)",
                .cra_driver_name        = "omap-hmac-sha384",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA384_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
@@ -1728,7 +1736,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_name               = "hmac(sha512)",
                .cra_driver_name        = "omap-hmac-sha512",
                .cra_priority           = 400,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
+               .cra_flags              = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                               CRYPTO_ALG_ASYNC |
                                                CRYPTO_ALG_NEED_FALLBACK,
                .cra_blocksize          = SHA512_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
@@ -2154,6 +2163,7 @@ static int omap_sham_probe(struct platform_device *pdev)
        }
 
        dd->flags |= dd->pdata->flags;
+       sham.flags |= dd->pdata->flags;
 
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
@@ -2181,6 +2191,9 @@ static int omap_sham_probe(struct platform_device *pdev)
        spin_unlock(&sham.lock);
 
        for (i = 0; i < dd->pdata->algs_info_size; i++) {
+               if (dd->pdata->algs_info[i].registered)
+                       break;
+
                for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
                        struct ahash_alg *alg;
 
@@ -2232,9 +2245,11 @@ static int omap_sham_remove(struct platform_device *pdev)
        list_del(&dd->list);
        spin_unlock(&sham.lock);
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
-               for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+               for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
                        crypto_unregister_ahash(
                                        &dd->pdata->algs_info[i].algs_list[j]);
+                       dd->pdata->algs_info[i].registered--;
+               }
        tasklet_kill(&dd->done_task);
        pm_runtime_disable(&pdev->dev);
 
index fd045e6..cb8a6ea 100644 (file)
@@ -350,13 +350,18 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
        int err;
        unsigned long flags;
        struct scatterlist outhdr, iv_sg, status_sg, **sgs;
-       int i;
        u64 dst_len;
        unsigned int num_out = 0, num_in = 0;
        int sg_total;
        uint8_t *iv;
+       struct scatterlist *sg;
 
        src_nents = sg_nents_for_len(req->src, req->cryptlen);
+       if (src_nents < 0) {
+               pr_err("Invalid number of src SG.\n");
+               return src_nents;
+       }
+
        dst_nents = sg_nents(req->dst);
 
        pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -402,6 +407,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
                goto free;
        }
 
+       dst_len = min_t(unsigned int, req->cryptlen, dst_len);
        pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
                        req->cryptlen, dst_len);
 
@@ -442,12 +448,12 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
        vc_sym_req->iv = iv;
 
        /* Source data */
-       for (i = 0; i < src_nents; i++)
-               sgs[num_out++] = &req->src[i];
+       for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
+               sgs[num_out++] = sg;
 
        /* Destination data */
-       for (i = 0; i < dst_nents; i++)
-               sgs[num_out + num_in++] = &req->dst[i];
+       for (sg = req->dst; sg; sg = sg_next(sg))
+               sgs[num_out + num_in++] = sg;
 
        /* Status */
        sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
@@ -577,10 +583,11 @@ static void virtio_crypto_skcipher_finalize_req(
                scatterwalk_map_and_copy(req->iv, req->dst,
                                         req->cryptlen - AES_BLOCK_SIZE,
                                         AES_BLOCK_SIZE, 0);
-       crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
-                                          req, err);
        kzfree(vc_sym_req->iv);
        virtcrypto_clear_request(&vc_sym_req->base);
+
+       crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
+                                          req, err);
 }
 
 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
index 7d39b85..3a3a511 100644 (file)
@@ -226,6 +226,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
        case 3:
        case 4:
                color_index = TrueCModeIndex;
+               break;
        default:
                return;
        }
index b1099e1..d877ddc 100644 (file)
@@ -27,6 +27,7 @@
 #include <drm/drm_print.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
+#include <drm/drm_sysfs.h>
 
 #include <linux/uaccess.h>
 
@@ -523,6 +524,10 @@ int drm_connector_register(struct drm_connector *connector)
        drm_mode_object_register(connector->dev, &connector->base);
 
        connector->registration_state = DRM_CONNECTOR_REGISTERED;
+
+       /* Let userspace know we have a new connector */
+       drm_sysfs_hotplug_event(connector->dev);
+
        goto unlock;
 
 err_debugfs:
index 939f003..f0336c8 100644 (file)
@@ -291,9 +291,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
                return PTR_ERR(connector->kdev);
        }
 
-       /* Let userspace know we have a new connector */
-       drm_sysfs_hotplug_event(dev);
-
        if (connector->ddc)
                return sysfs_create_link(&connector->kdev->kobj,
                                 &connector->ddc->dev.kobj, "ddc");
index 40d42dc..ed9e53c 100644 (file)
@@ -5206,6 +5206,9 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
                       struct intel_crtc_state *crtc_state,
                       unsigned int type)
 {
+       if (encoder->type != INTEL_OUTPUT_DDI)
+               return;
+
        switch (type) {
        case DP_SDP_VSC:
                intel_read_dp_vsc_sdp(encoder, crtc_state,
index f5d59d1..30c229f 100644 (file)
@@ -1921,11 +1921,6 @@ get_engines(struct i915_gem_context *ctx,
        }
 
        user = u64_to_user_ptr(args->value);
-       if (!access_ok(user, size)) {
-               err = -EFAULT;
-               goto err_free;
-       }
-
        if (put_user(0, &user->extensions)) {
                err = -EFAULT;
                goto err_free;
index 3ce1856..db8eb1c 100644 (file)
@@ -1988,6 +1988,38 @@ static const struct dma_fence_work_ops eb_parse_ops = {
        .release = __eb_parse_release,
 };
 
+static inline int
+__parser_mark_active(struct i915_vma *vma,
+                    struct intel_timeline *tl,
+                    struct dma_fence *fence)
+{
+       struct intel_gt_buffer_pool_node *node = vma->private;
+
+       return i915_active_ref(&node->active, tl, fence);
+}
+
+static int
+parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
+{
+       int err;
+
+       mutex_lock(&tl->mutex);
+
+       err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
+       if (err)
+               goto unlock;
+
+       if (pw->trampoline) {
+               err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
+               if (err)
+                       goto unlock;
+       }
+
+unlock:
+       mutex_unlock(&tl->mutex);
+       return err;
+}
+
 static int eb_parse_pipeline(struct i915_execbuffer *eb,
                             struct i915_vma *shadow,
                             struct i915_vma *trampoline)
@@ -2022,20 +2054,25 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
        pw->shadow = shadow;
        pw->trampoline = trampoline;
 
+       /* Mark active refs early for this worker, in case we get interrupted */
+       err = parser_mark_active(pw, eb->context->timeline);
+       if (err)
+               goto err_commit;
+
        err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
        if (err)
-               goto err_trampoline;
+               goto err_commit;
 
        err = dma_resv_reserve_shared(pw->batch->resv, 1);
        if (err)
-               goto err_batch_unlock;
+               goto err_commit_unlock;
 
        /* Wait for all writes (and relocs) into the batch to complete */
        err = i915_sw_fence_await_reservation(&pw->base.chain,
                                              pw->batch->resv, NULL, false,
                                              0, I915_FENCE_GFP);
        if (err < 0)
-               goto err_batch_unlock;
+               goto err_commit_unlock;
 
        /* Keep the batch alive and unwritten as we parse */
        dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
@@ -2050,11 +2087,13 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
        dma_fence_work_commit_imm(&pw->base);
        return 0;
 
-err_batch_unlock:
+err_commit_unlock:
        dma_resv_unlock(pw->batch->resv);
-err_trampoline:
-       if (trampoline)
-               i915_active_release(&trampoline->active);
+err_commit:
+       i915_sw_fence_set_error_once(&pw->base.chain, err);
+       dma_fence_work_commit_imm(&pw->base);
+       return err;
+
 err_shadow:
        i915_active_release(&shadow->active);
 err_batch:
@@ -2100,6 +2139,7 @@ static int eb_parse(struct i915_execbuffer *eb)
                goto err;
        }
        i915_gem_object_set_readonly(shadow->obj);
+       shadow->private = pool;
 
        trampoline = NULL;
        if (CMDPARSER_USES_GGTT(eb->i915)) {
@@ -2113,6 +2153,7 @@ static int eb_parse(struct i915_execbuffer *eb)
                        shadow = trampoline;
                        goto err_shadow;
                }
+               shadow->private = pool;
 
                eb->batch_flags |= I915_DISPATCH_SECURE;
        }
@@ -2129,7 +2170,6 @@ static int eb_parse(struct i915_execbuffer *eb)
        eb->trampoline = trampoline;
        eb->batch_start_offset = 0;
 
-       shadow->private = pool;
        return 0;
 
 err_trampoline:
index 8e45ca3..55b97c3 100644 (file)
@@ -47,20 +47,16 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
                                unsigned long arg)
 {
        struct drm_i915_getparam32 req32;
-       drm_i915_getparam_t __user *request;
+       struct drm_i915_getparam req;
 
        if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
                return -EFAULT;
 
-       request = compat_alloc_user_space(sizeof(*request));
-       if (!access_ok(request, sizeof(*request)) ||
-           __put_user(req32.param, &request->param) ||
-           __put_user((void __user *)(unsigned long)req32.value,
-                      &request->value))
-               return -EFAULT;
+       req.param = req32.param;
+       req.value = compat_ptr(req32.value);
 
-       return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
-                        (unsigned long)request);
+       return drm_ioctl_kernel(file, i915_getparam_ioctl, &req,
+                               DRM_RENDER_ALLOW);
 }
 
 static drm_ioctl_compat_t *i915_compat_ioctls[] = {
index a3dde77..02559da 100644 (file)
@@ -65,7 +65,7 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
        "Override/Ignore selection of SDVO panel mode in the VBT "
        "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
-i915_param_named_unsafe(reset, int, 0600,
+i915_param_named_unsafe(reset, uint, 0600,
        "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
 
 i915_param_named_unsafe(vbt_firmware, charp, 0400,
index 665bb07..25329b7 100644 (file)
@@ -3896,9 +3896,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
        if (!n_regs)
                return NULL;
 
-       if (!access_ok(regs, n_regs * sizeof(u32) * 2))
-               return ERR_PTR(-EFAULT);
-
        /* No is_valid function means we're not allowing any register to be programmed. */
        GEM_BUG_ON(!is_valid);
        if (!is_valid)
index ef25ce6..e75c528 100644 (file)
@@ -25,10 +25,6 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
                           query_sz))
                return -EFAULT;
 
-       if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
-                      total_length))
-               return -EFAULT;
-
        return 0;
 }
 
@@ -72,20 +68,20 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
        topo.eu_offset = slice_length + subslice_length;
        topo.eu_stride = sseu->eu_stride;
 
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
                           &topo, sizeof(topo)))
                return -EFAULT;
 
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
                           &sseu->slice_mask, slice_length))
                return -EFAULT;
 
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
                                           sizeof(topo) + slice_length),
                           sseu->subslice_mask, subslice_length))
                return -EFAULT;
 
-       if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+       if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
                                           sizeof(topo) +
                                           slice_length + subslice_length),
                           sseu->eu_mask, eu_length))
@@ -131,14 +127,14 @@ query_engine_info(struct drm_i915_private *i915,
                info.engine.engine_instance = engine->uabi_instance;
                info.capabilities = engine->uabi_capabilities;
 
-               if (__copy_to_user(info_ptr, &info, sizeof(info)))
+               if (copy_to_user(info_ptr, &info, sizeof(info)))
                        return -EFAULT;
 
                query.num_engines++;
                info_ptr++;
        }
 
-       if (__copy_to_user(query_ptr, &query, sizeof(query)))
+       if (copy_to_user(query_ptr, &query, sizeof(query)))
                return -EFAULT;
 
        return len;
@@ -158,10 +154,6 @@ static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
        if (user_n_regs < kernel_n_regs)
                return -EINVAL;
 
-       if (!access_ok(u64_to_user_ptr(user_regs_ptr),
-                      2 * sizeof(u32) * kernel_n_regs))
-               return -EFAULT;
-
        return 0;
 }
 
@@ -170,6 +162,7 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
                                                u64 user_regs_ptr,
                                                u32 *user_n_regs)
 {
+       u32 __user *p = u64_to_user_ptr(user_regs_ptr);
        u32 r;
 
        if (*user_n_regs == 0) {
@@ -179,25 +172,19 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
 
        *user_n_regs = kernel_n_regs;
 
-       for (r = 0; r < kernel_n_regs; r++) {
-               u32 __user *user_reg_ptr =
-                       u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
-               u32 __user *user_val_ptr =
-                       u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
-                                       sizeof(u32));
-               int ret;
-
-               ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
-                                user_reg_ptr);
-               if (ret)
-                       return -EFAULT;
+       if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
+               return -EFAULT;
 
-               ret = __put_user(kernel_regs[r].value, user_val_ptr);
-               if (ret)
-                       return -EFAULT;
+       for (r = 0; r < kernel_n_regs; r++, p += 2) {
+               unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+                               p, Efault);
+               unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
        }
-
+       user_write_access_end();
        return 0;
+Efault:
+       user_write_access_end();
+       return -EFAULT;
 }
 
 static int query_perf_config_data(struct drm_i915_private *i915,
@@ -233,10 +220,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
                return -EINVAL;
        }
 
-       if (!access_ok(user_query_config_ptr, total_size))
-               return -EFAULT;
-
-       if (__get_user(flags, &user_query_config_ptr->flags))
+       if (get_user(flags, &user_query_config_ptr->flags))
                return -EFAULT;
 
        if (flags != 0)
@@ -249,7 +233,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
                BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
 
                memset(&uuid, 0, sizeof(uuid));
-               if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+               if (copy_from_user(uuid, user_query_config_ptr->uuid,
                                     sizeof(user_query_config_ptr->uuid)))
                        return -EFAULT;
 
@@ -263,7 +247,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
                }
                rcu_read_unlock();
        } else {
-               if (__get_user(config_id, &user_query_config_ptr->config))
+               if (get_user(config_id, &user_query_config_ptr->config))
                        return -EFAULT;
 
                oa_config = i915_perf_get_oa_config(perf, config_id);
@@ -271,8 +255,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
        if (!oa_config)
                return -ENOENT;
 
-       if (__copy_from_user(&user_config, user_config_ptr,
-                            sizeof(user_config))) {
+       if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
                ret = -EFAULT;
                goto out;
        }
@@ -318,8 +301,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
 
        memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
 
-       if (__copy_to_user(user_config_ptr, &user_config,
-                          sizeof(user_config))) {
+       if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
                ret = -EFAULT;
                goto out;
        }
index 6c076a2..7717581 100644 (file)
@@ -186,7 +186,7 @@ typedef struct {
 
 #define INVALID_MMIO_REG _MMIO(0)
 
-static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
+static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg)
 {
        return reg.reg;
 }
index 7ad3f06..00ca35f 100644 (file)
 #define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE      3
 
 #define SUN4I_HDMI_DDC_CLK_REG         0x528
-#define SUN4I_HDMI_DDC_CLK_M(m)                        (((m) & 0x7) << 3)
+#define SUN4I_HDMI_DDC_CLK_M(m)                        (((m) & 0xf) << 3)
 #define SUN4I_HDMI_DDC_CLK_N(n)                        ((n) & 0x7)
 
 #define SUN4I_HDMI_DDC_LINE_CTRL_REG   0x540
index 2ff7801..12430b9 100644 (file)
@@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
        unsigned long best_rate = 0;
        u8 best_m = 0, best_n = 0, _m, _n;
 
-       for (_m = 0; _m < 8; _m++) {
+       for (_m = 0; _m < 16; _m++) {
                for (_n = 0; _n < 8; _n++) {
                        unsigned long tmp_rate;
 
index b2ad319..6f1fe72 100644 (file)
@@ -387,8 +387,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
                                input_report_abs(hdata->input,
                                        ABS_MT_PRESSURE, z);
                        } else {
-                               input_mt_report_slot_state(hdata->input,
-                                       MT_TOOL_FINGER, 0);
+                               input_mt_report_slot_inactive(hdata->input);
                        }
                }
 
index 35c8c17..3f94b49 100644 (file)
@@ -899,7 +899,7 @@ static void mt_release_pending_palms(struct mt_device *td,
                clear_bit(slotnum, app->pending_palm_slots);
 
                input_mt_slot(input, slotnum);
-               input_mt_report_slot_state(input, MT_TOOL_PALM, false);
+               input_mt_report_slot_inactive(input);
 
                need_sync = true;
        }
@@ -1643,9 +1643,7 @@ static void mt_release_contacts(struct hid_device *hid)
                if (mt) {
                        for (i = 0; i < mt->num_slots; i++) {
                                input_mt_slot(input_dev, i);
-                               input_mt_report_slot_state(input_dev,
-                                                          MT_TOOL_FINGER,
-                                                          false);
+                               input_mt_report_slot_inactive(input_dev);
                        }
                        input_mt_sync_frame(input_dev);
                        input_sync(input_dev);
index 0d57e51..e494295 100644 (file)
@@ -282,7 +282,8 @@ static void evdev_pass_values(struct evdev_client *client,
        spin_unlock(&client->buffer_lock);
 
        if (wakeup)
-               wake_up_interruptible(&evdev->wait);
+               wake_up_interruptible_poll(&evdev->wait,
+                       EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
 }
 
 /*
@@ -429,7 +430,7 @@ static void evdev_hangup(struct evdev *evdev)
                kill_fasync(&client->fasync, SIGIO, POLL_HUP);
        spin_unlock(&evdev->client_lock);
 
-       wake_up_interruptible(&evdev->wait);
+       wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
 }
 
 static int evdev_release(struct inode *inode, struct file *file)
@@ -945,7 +946,7 @@ static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
        client->revoked = true;
        evdev_ungrab(evdev, client);
        input_flush_device(&evdev->handle, file);
-       wake_up_interruptible(&evdev->wait);
+       wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
 
        return 0;
 }
index 940b744..6f73f02 100644 (file)
@@ -45,6 +45,7 @@ config JOYSTICK_A3D
 config JOYSTICK_ADI
        tristate "Logitech ADI digital joysticks and gamepads"
        select GAMEPORT
+       depends on ADI!=m # avoid module name conflict
        help
          Say Y here if you have a Logitech controller using the ADI
          protocol over the PC gameport.
index 28de965..793ecbb 100644 (file)
@@ -701,7 +701,7 @@ config KEYBOARD_SPEAR
          Say Y here if you want to use the SPEAR keyboard.
 
          To compile this driver as a module, choose M here: the
-         module will be called spear-keboard.
+         module will be called spear-keyboard.
 
 config KEYBOARD_TC3589X
        tristate "TC3589X Keypad support"
index 7e3eae5..6ec2826 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/libps2.h>
 #include <linux/mutex.h>
 #include <linux/dmi.h>
+#include <linux/property.h>
 
 #define DRIVER_DESC    "AT and PS/2 keyboard driver"
 
@@ -63,6 +64,11 @@ static bool atkbd_terminal;
 module_param_named(terminal, atkbd_terminal, bool, 0);
 MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
 
+#define MAX_FUNCTION_ROW_KEYS  24
+
+#define SCANCODE(keymap)       ((keymap >> 16) & 0xFFFF)
+#define KEYCODE(keymap)                (keymap & 0xFFFF)
+
 /*
  * Scancode to keycode tables. These are just the default setting, and
  * are loadable via a userland utility.
@@ -230,6 +236,9 @@ struct atkbd {
 
        /* Serializes reconnect(), attr->set() and event work */
        struct mutex mutex;
+
+       u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
+       int num_function_row_keys;
 };
 
 /*
@@ -283,6 +292,7 @@ static struct device_attribute atkbd_attr_##_name =                         \
        __ATTR(_name, S_IRUGO, atkbd_do_show_##_name, NULL);
 
 ATKBD_DEFINE_RO_ATTR(err_count);
+ATKBD_DEFINE_RO_ATTR(function_row_physmap);
 
 static struct attribute *atkbd_attributes[] = {
        &atkbd_attr_extra.attr,
@@ -292,11 +302,42 @@ static struct attribute *atkbd_attributes[] = {
        &atkbd_attr_softrepeat.attr,
        &atkbd_attr_softraw.attr,
        &atkbd_attr_err_count.attr,
+       &atkbd_attr_function_row_physmap.attr,
        NULL
 };
 
+static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
+{
+       ssize_t size = 0;
+       int i;
+
+       if (!atkbd->num_function_row_keys)
+               return 0;
+
+       for (i = 0; i < atkbd->num_function_row_keys; i++)
+               size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
+                                 atkbd->function_row_physmap[i]);
+       size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+       return size;
+}
+
+static umode_t atkbd_attr_is_visible(struct kobject *kobj,
+                               struct attribute *attr, int i)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct serio *serio = to_serio_port(dev);
+       struct atkbd *atkbd = serio_get_drvdata(serio);
+
+       if (attr == &atkbd_attr_function_row_physmap.attr &&
+           !atkbd->num_function_row_keys)
+               return 0;
+
+       return attr->mode;
+}
+
 static struct attribute_group atkbd_attribute_group = {
        .attrs  = atkbd_attributes,
+       .is_visible = atkbd_attr_is_visible,
 };
 
 static const unsigned int xl_table[] = {
@@ -994,6 +1035,39 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
        return code;
 }
 
+static int atkbd_get_keymap_from_fwnode(struct atkbd *atkbd)
+{
+       struct device *dev = &atkbd->ps2dev.serio->dev;
+       int i, n;
+       u32 *ptr;
+       u16 scancode, keycode;
+
+       /* Parse "linux,keymap" property */
+       n = device_property_count_u32(dev, "linux,keymap");
+       if (n <= 0 || n > ATKBD_KEYMAP_SIZE)
+               return -ENXIO;
+
+       ptr = kcalloc(n, sizeof(u32), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       if (device_property_read_u32_array(dev, "linux,keymap", ptr, n)) {
+               dev_err(dev, "problem parsing FW keymap property\n");
+               kfree(ptr);
+               return -EINVAL;
+       }
+
+       memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
+       for (i = 0; i < n; i++) {
+               scancode = SCANCODE(ptr[i]);
+               keycode = KEYCODE(ptr[i]);
+               atkbd->keycode[scancode] = keycode;
+       }
+
+       kfree(ptr);
+       return 0;
+}
+
 /*
  * atkbd_set_keycode_table() initializes keyboard's keycode table
  * according to the selected scancode set
@@ -1001,13 +1075,16 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
 
 static void atkbd_set_keycode_table(struct atkbd *atkbd)
 {
+       struct device *dev = &atkbd->ps2dev.serio->dev;
        unsigned int scancode;
        int i, j;
 
        memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
        bitmap_zero(atkbd->force_release_mask, ATKBD_KEYMAP_SIZE);
 
-       if (atkbd->translated) {
+       if (!atkbd_get_keymap_from_fwnode(atkbd)) {
+               dev_dbg(dev, "Using FW keymap\n");
+       } else if (atkbd->translated) {
                for (i = 0; i < 128; i++) {
                        scancode = atkbd_unxlate_table[i];
                        atkbd->keycode[i] = atkbd_set2_keycode[scancode];
@@ -1121,6 +1198,22 @@ static void atkbd_set_device_attrs(struct atkbd *atkbd)
        }
 }
 
+static void atkbd_parse_fwnode_data(struct serio *serio)
+{
+       struct atkbd *atkbd = serio_get_drvdata(serio);
+       struct device *dev = &serio->dev;
+       int n;
+
+       /* Parse "function-row-physmap" property */
+       n = device_property_count_u32(dev, "function-row-physmap");
+       if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+           !device_property_read_u32_array(dev, "function-row-physmap",
+                                           atkbd->function_row_physmap, n)) {
+               atkbd->num_function_row_keys = n;
+               dev_dbg(dev, "FW reported %d function-row key locations\n", n);
+       }
+}
+
 /*
  * atkbd_connect() is called when the serio module finds an interface
  * that isn't handled yet by an appropriate device driver. We check if
@@ -1184,6 +1277,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
                atkbd->id = 0xab00;
        }
 
+       atkbd_parse_fwnode_data(serio);
+
        atkbd_set_keycode_table(atkbd);
        atkbd_set_device_attrs(atkbd);
 
index 9f809ae..d18839f 100644 (file)
@@ -99,6 +99,15 @@ static void imx_sc_check_for_events(struct work_struct *work)
                                      msecs_to_jiffies(REPEAT_INTERVAL));
 }
 
+static void imx_sc_key_action(void *data)
+{
+       struct imx_key_drv_data *priv = data;
+
+       imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
+       imx_scu_irq_unregister_notifier(&priv->key_notifier);
+       cancel_delayed_work_sync(&priv->check_work);
+}
+
 static int imx_sc_key_probe(struct platform_device *pdev)
 {
        struct imx_key_drv_data *priv;
@@ -149,27 +158,16 @@ static int imx_sc_key_probe(struct platform_device *pdev)
                return error;
        }
 
+       error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
+       if (error)
+               return error;
+
        priv->key_notifier.notifier_call = imx_sc_key_notify;
        error = imx_scu_irq_register_notifier(&priv->key_notifier);
-       if (error) {
-               imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
-                                        false);
+       if (error)
                dev_err(&pdev->dev, "failed to register scu notifier\n");
-               return error;
-       }
-
-       return 0;
-}
-
-static int imx_sc_key_remove(struct platform_device *pdev)
-{
-       struct imx_key_drv_data *priv = platform_get_drvdata(pdev);
-
-       imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
-       imx_scu_irq_unregister_notifier(&priv->key_notifier);
-       cancel_delayed_work_sync(&priv->check_work);
 
-       return 0;
+       return error;
 }
 
 static const struct of_device_id imx_sc_key_ids[] = {
@@ -184,7 +182,6 @@ static struct platform_driver imx_sc_key_driver = {
                .of_match_table = imx_sc_key_ids,
        },
        .probe = imx_sc_key_probe,
-       .remove = imx_sc_key_remove,
 };
 module_platform_driver(imx_sc_key_driver);
 
index 2175876..9b0f966 100644 (file)
@@ -374,5 +374,5 @@ static void __exit tca6416_keypad_exit(void)
 module_exit(tca6416_keypad_exit);
 
 MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
-MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander");
+MODULE_DESCRIPTION("Keypad driver over tca6416 IO expander");
 MODULE_LICENSE("GPL");
index 7e2e658..362e8a0 100644 (file)
@@ -117,16 +117,6 @@ config INPUT_E3X0_BUTTON
          To compile this driver as a module, choose M here: the
          module will be called e3x0_button.
 
-config INPUT_MSM_VIBRATOR
-       tristate "Qualcomm MSM vibrator driver"
-       select INPUT_FF_MEMLESS
-       help
-         Support for the vibrator that is found on various Qualcomm MSM
-         SOCs.
-
-         To compile this driver as a module, choose M here: the module
-         will be called msm_vibrator.
-
 config INPUT_PCSPKR
        tristate "PC Speaker support"
        depends on PCSPKR_PLATFORM
@@ -265,17 +255,6 @@ config INPUT_APANEL
         To compile this driver as a module, choose M here: the module will
         be called apanel.
 
-config INPUT_GP2A
-       tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
-       depends on I2C
-       depends on GPIOLIB || COMPILE_TEST
-       help
-         Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
-         hooked to an I2C bus.
-
-         To compile this driver as a module, choose M here: the
-         module will be called gp2ap002a00f.
-
 config INPUT_GPIO_BEEPER
        tristate "Generic GPIO Beeper support"
        depends on GPIOLIB || COMPILE_TEST
@@ -739,6 +718,17 @@ config INPUT_IMS_PCU
          To compile this driver as a module, choose M here: the module will be
          called ims_pcu.
 
+config INPUT_IQS269A
+       tristate "Azoteq IQS269A capacitive touch controller"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Say Y to enable support for the Azoteq IQS269A capacitive
+         touch controller.
+
+         To compile this driver as a module, choose M here: the
+         module will be called iqs269a.
+
 config INPUT_CMA3000
        tristate "VTI CMA3000 Tri-axis accelerometer"
        help
index 8fd187f..a48e5f2 100644 (file)
@@ -33,13 +33,13 @@ obj-$(CONFIG_INPUT_E3X0_BUTTON)             += e3x0-button.o
 obj-$(CONFIG_INPUT_DRV260X_HAPTICS)    += drv260x.o
 obj-$(CONFIG_INPUT_DRV2665_HAPTICS)    += drv2665.o
 obj-$(CONFIG_INPUT_DRV2667_HAPTICS)    += drv2667.o
-obj-$(CONFIG_INPUT_GP2A)               += gp2ap002a00f.o
 obj-$(CONFIG_INPUT_GPIO_BEEPER)                += gpio-beeper.o
 obj-$(CONFIG_INPUT_GPIO_DECODER)       += gpio_decoder.o
 obj-$(CONFIG_INPUT_GPIO_VIBRA)         += gpio-vibra.o
 obj-$(CONFIG_INPUT_HISI_POWERKEY)      += hisi_powerkey.o
 obj-$(CONFIG_HP_SDC_RTC)               += hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)            += ims-pcu.o
+obj-$(CONFIG_INPUT_IQS269A)            += iqs269a.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)      += ixp4xx-beeper.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)     += keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)              += kxtj9.o
@@ -50,7 +50,6 @@ obj-$(CONFIG_INPUT_MAX8925_ONKEY)     += max8925_onkey.o
 obj-$(CONFIG_INPUT_MAX8997_HAPTIC)     += max8997_haptic.o
 obj-$(CONFIG_INPUT_MC13783_PWRBUTTON)  += mc13783-pwrbutton.o
 obj-$(CONFIG_INPUT_MMA8450)            += mma8450.o
-obj-$(CONFIG_INPUT_MSM_VIBRATOR)       += msm-vibrator.o
 obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON)   += palmas-pwrbutton.o
 obj-$(CONFIG_INPUT_PCAP)               += pcap_keys.o
 obj-$(CONFIG_INPUT_PCF50633_PMU)       += pcf50633-input.o
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
deleted file mode 100644 (file)
index 90abda8..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
- *
- * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
- * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
- */
-
-#include <linux/i2c.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/input/gp2ap002a00f.h>
-
-struct gp2a_data {
-       struct input_dev *input;
-       const struct gp2a_platform_data *pdata;
-       struct i2c_client *i2c_client;
-};
-
-enum gp2a_addr {
-       GP2A_ADDR_PROX  = 0x0,
-       GP2A_ADDR_GAIN  = 0x1,
-       GP2A_ADDR_HYS   = 0x2,
-       GP2A_ADDR_CYCLE = 0x3,
-       GP2A_ADDR_OPMOD = 0x4,
-       GP2A_ADDR_CON   = 0x6
-};
-
-enum gp2a_controls {
-       /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
-       GP2A_CTRL_SSD   = 0x01
-};
-
-static int gp2a_report(struct gp2a_data *dt)
-{
-       int vo = gpio_get_value(dt->pdata->vout_gpio);
-
-       input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
-       input_sync(dt->input);
-
-       return 0;
-}
-
-static irqreturn_t gp2a_irq(int irq, void *handle)
-{
-       struct gp2a_data *dt = handle;
-
-       gp2a_report(dt);
-
-       return IRQ_HANDLED;
-}
-
-static int gp2a_enable(struct gp2a_data *dt)
-{
-       return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
-                                        GP2A_CTRL_SSD);
-}
-
-static int gp2a_disable(struct gp2a_data *dt)
-{
-       return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
-                                        0x00);
-}
-
-static int gp2a_device_open(struct input_dev *dev)
-{
-       struct gp2a_data *dt = input_get_drvdata(dev);
-       int error;
-
-       error = gp2a_enable(dt);
-       if (error < 0) {
-               dev_err(&dt->i2c_client->dev,
-                       "unable to activate, err %d\n", error);
-               return error;
-       }
-
-       gp2a_report(dt);
-
-       return 0;
-}
-
-static void gp2a_device_close(struct input_dev *dev)
-{
-       struct gp2a_data *dt = input_get_drvdata(dev);
-       int error;
-
-       error = gp2a_disable(dt);
-       if (error < 0)
-               dev_err(&dt->i2c_client->dev,
-                       "unable to deactivate, err %d\n", error);
-}
-
-static int gp2a_initialize(struct gp2a_data *dt)
-{
-       int error;
-
-       error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
-                                         0x08);
-       if (error < 0)
-               return error;
-
-       error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
-                                         0xc2);
-       if (error < 0)
-               return error;
-
-       error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
-                                         0x04);
-       if (error < 0)
-               return error;
-
-       error = gp2a_disable(dt);
-
-       return error;
-}
-
-static int gp2a_probe(struct i2c_client *client,
-                               const struct i2c_device_id *id)
-{
-       const struct gp2a_platform_data *pdata = dev_get_platdata(&client->dev);
-       struct gp2a_data *dt;
-       int error;
-
-       if (!pdata)
-               return -EINVAL;
-
-       if (pdata->hw_setup) {
-               error = pdata->hw_setup(client);
-               if (error < 0)
-                       return error;
-       }
-
-       error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
-       if (error)
-               goto err_hw_shutdown;
-
-       dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
-       if (!dt) {
-               error = -ENOMEM;
-               goto err_free_gpio;
-       }
-
-       dt->pdata = pdata;
-       dt->i2c_client = client;
-
-       error = gp2a_initialize(dt);
-       if (error < 0)
-               goto err_free_mem;
-
-       dt->input = input_allocate_device();
-       if (!dt->input) {
-               error = -ENOMEM;
-               goto err_free_mem;
-       }
-
-       input_set_drvdata(dt->input, dt);
-
-       dt->input->open = gp2a_device_open;
-       dt->input->close = gp2a_device_close;
-       dt->input->name = GP2A_I2C_NAME;
-       dt->input->id.bustype = BUS_I2C;
-       dt->input->dev.parent = &client->dev;
-
-       input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
-
-       error = request_threaded_irq(client->irq, NULL, gp2a_irq,
-                       IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
-                               IRQF_ONESHOT,
-                       GP2A_I2C_NAME, dt);
-       if (error) {
-               dev_err(&client->dev, "irq request failed\n");
-               goto err_free_input_dev;
-       }
-
-       error = input_register_device(dt->input);
-       if (error) {
-               dev_err(&client->dev, "device registration failed\n");
-               goto err_free_irq;
-       }
-
-       device_init_wakeup(&client->dev, pdata->wakeup);
-       i2c_set_clientdata(client, dt);
-
-       return 0;
-
-err_free_irq:
-       free_irq(client->irq, dt);
-err_free_input_dev:
-       input_free_device(dt->input);
-err_free_mem:
-       kfree(dt);
-err_free_gpio:
-       gpio_free(pdata->vout_gpio);
-err_hw_shutdown:
-       if (pdata->hw_shutdown)
-               pdata->hw_shutdown(client);
-       return error;
-}
-
-static int gp2a_remove(struct i2c_client *client)
-{
-       struct gp2a_data *dt = i2c_get_clientdata(client);
-       const struct gp2a_platform_data *pdata = dt->pdata;
-
-       free_irq(client->irq, dt);
-
-       input_unregister_device(dt->input);
-       kfree(dt);
-
-       gpio_free(pdata->vout_gpio);
-
-       if (pdata->hw_shutdown)
-               pdata->hw_shutdown(client);
-
-       return 0;
-}
-
-static int __maybe_unused gp2a_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct gp2a_data *dt = i2c_get_clientdata(client);
-       int retval = 0;
-
-       if (device_may_wakeup(&client->dev)) {
-               enable_irq_wake(client->irq);
-       } else {
-               mutex_lock(&dt->input->mutex);
-               if (dt->input->users)
-                       retval = gp2a_disable(dt);
-               mutex_unlock(&dt->input->mutex);
-       }
-
-       return retval;
-}
-
-static int __maybe_unused gp2a_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct gp2a_data *dt = i2c_get_clientdata(client);
-       int retval = 0;
-
-       if (device_may_wakeup(&client->dev)) {
-               disable_irq_wake(client->irq);
-       } else {
-               mutex_lock(&dt->input->mutex);
-               if (dt->input->users)
-                       retval = gp2a_enable(dt);
-               mutex_unlock(&dt->input->mutex);
-       }
-
-       return retval;
-}
-
-static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
-
-static const struct i2c_device_id gp2a_i2c_id[] = {
-       { GP2A_I2C_NAME, 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
-
-static struct i2c_driver gp2a_i2c_driver = {
-       .driver = {
-               .name   = GP2A_I2C_NAME,
-               .pm     = &gp2a_pm,
-       },
-       .probe          = gp2a_probe,
-       .remove         = gp2a_remove,
-       .id_table       = gp2a_i2c_id,
-};
-
-module_i2c_driver(gp2a_i2c_driver);
-
-MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
-MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
new file mode 100644 (file)
index 0000000..6699eb1
--- /dev/null
@@ -0,0 +1,1833 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS269A Capacitive Touch Controller
+ *
+ * Copyright (C) 2020 Jeff LaBundy <jeff@labundy.com>
+ *
+ * This driver registers up to 3 input devices: one representing capacitive or
+ * inductive keys as well as Hall-effect switches, and one for each of the two
+ * axial sliders presented by the device.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define IQS269_VER_INFO                                0x00
+#define IQS269_VER_INFO_PROD_NUM               0x4F
+
+#define IQS269_SYS_FLAGS                       0x02
+#define IQS269_SYS_FLAGS_SHOW_RESET            BIT(15)
+#define IQS269_SYS_FLAGS_PWR_MODE_MASK         GENMASK(12, 11)
+#define IQS269_SYS_FLAGS_PWR_MODE_SHIFT                11
+#define IQS269_SYS_FLAGS_IN_ATI                        BIT(10)
+
+#define IQS269_CHx_COUNTS                      0x08
+
+#define IQS269_SLIDER_X                                0x30
+
+#define IQS269_CAL_DATA_A                      0x35
+#define IQS269_CAL_DATA_A_HALL_BIN_L_MASK      GENMASK(15, 12)
+#define IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT     12
+#define IQS269_CAL_DATA_A_HALL_BIN_R_MASK      GENMASK(11, 8)
+#define IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT     8
+
+#define IQS269_SYS_SETTINGS                    0x80
+#define IQS269_SYS_SETTINGS_CLK_DIV            BIT(15)
+#define IQS269_SYS_SETTINGS_ULP_AUTO           BIT(14)
+#define IQS269_SYS_SETTINGS_DIS_AUTO           BIT(13)
+#define IQS269_SYS_SETTINGS_PWR_MODE_MASK      GENMASK(12, 11)
+#define IQS269_SYS_SETTINGS_PWR_MODE_SHIFT     11
+#define IQS269_SYS_SETTINGS_PWR_MODE_MAX       3
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK    GENMASK(10, 8)
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT   8
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX     7
+#define IQS269_SYS_SETTINGS_RESEED_OFFSET      BIT(6)
+#define IQS269_SYS_SETTINGS_EVENT_MODE         BIT(5)
+#define IQS269_SYS_SETTINGS_EVENT_MODE_LP      BIT(4)
+#define IQS269_SYS_SETTINGS_REDO_ATI           BIT(2)
+#define IQS269_SYS_SETTINGS_ACK_RESET          BIT(0)
+
+#define IQS269_FILT_STR_LP_LTA_MASK            GENMASK(7, 6)
+#define IQS269_FILT_STR_LP_LTA_SHIFT           6
+#define IQS269_FILT_STR_LP_CNT_MASK            GENMASK(5, 4)
+#define IQS269_FILT_STR_LP_CNT_SHIFT           4
+#define IQS269_FILT_STR_NP_LTA_MASK            GENMASK(3, 2)
+#define IQS269_FILT_STR_NP_LTA_SHIFT           2
+#define IQS269_FILT_STR_NP_CNT_MASK            GENMASK(1, 0)
+#define IQS269_FILT_STR_MAX                    3
+
+#define IQS269_EVENT_MASK_SYS                  BIT(6)
+#define IQS269_EVENT_MASK_DEEP                 BIT(2)
+#define IQS269_EVENT_MASK_TOUCH                        BIT(1)
+#define IQS269_EVENT_MASK_PROX                 BIT(0)
+
+#define IQS269_RATE_NP_MS_MAX                  255
+#define IQS269_RATE_LP_MS_MAX                  255
+#define IQS269_RATE_ULP_MS_MAX                 4080
+#define IQS269_TIMEOUT_PWR_MS_MAX              130560
+#define IQS269_TIMEOUT_LTA_MS_MAX              130560
+
+#define IQS269_MISC_A_ATI_BAND_DISABLE         BIT(15)
+#define IQS269_MISC_A_ATI_LP_ONLY              BIT(14)
+#define IQS269_MISC_A_ATI_BAND_TIGHTEN         BIT(13)
+#define IQS269_MISC_A_FILT_DISABLE             BIT(12)
+#define IQS269_MISC_A_GPIO3_SELECT_MASK                GENMASK(10, 8)
+#define IQS269_MISC_A_GPIO3_SELECT_SHIFT       8
+#define IQS269_MISC_A_DUAL_DIR                 BIT(6)
+#define IQS269_MISC_A_TX_FREQ_MASK             GENMASK(5, 4)
+#define IQS269_MISC_A_TX_FREQ_SHIFT            4
+#define IQS269_MISC_A_TX_FREQ_MAX              3
+#define IQS269_MISC_A_GLOBAL_CAP_SIZE          BIT(0)
+
+#define IQS269_MISC_B_RESEED_UI_SEL_MASK       GENMASK(7, 6)
+#define IQS269_MISC_B_RESEED_UI_SEL_SHIFT      6
+#define IQS269_MISC_B_RESEED_UI_SEL_MAX                3
+#define IQS269_MISC_B_TRACKING_UI_ENABLE       BIT(4)
+#define IQS269_MISC_B_FILT_STR_SLIDER          GENMASK(1, 0)
+
+#define IQS269_CHx_SETTINGS                    0x8C
+
+#define IQS269_CHx_ENG_A_MEAS_CAP_SIZE         BIT(15)
+#define IQS269_CHx_ENG_A_RX_GND_INACTIVE       BIT(13)
+#define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE                BIT(12)
+#define IQS269_CHx_ENG_A_ATI_MODE_MASK         GENMASK(9, 8)
+#define IQS269_CHx_ENG_A_ATI_MODE_SHIFT                8
+#define IQS269_CHx_ENG_A_ATI_MODE_MAX          3
+#define IQS269_CHx_ENG_A_INV_LOGIC             BIT(7)
+#define IQS269_CHx_ENG_A_PROJ_BIAS_MASK                GENMASK(6, 5)
+#define IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT       5
+#define IQS269_CHx_ENG_A_PROJ_BIAS_MAX         3
+#define IQS269_CHx_ENG_A_SENSE_MODE_MASK       GENMASK(3, 0)
+#define IQS269_CHx_ENG_A_SENSE_MODE_MAX                15
+
+#define IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE      BIT(13)
+#define IQS269_CHx_ENG_B_SENSE_FREQ_MASK       GENMASK(10, 9)
+#define IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT      9
+#define IQS269_CHx_ENG_B_SENSE_FREQ_MAX                3
+#define IQS269_CHx_ENG_B_STATIC_ENABLE         BIT(8)
+#define IQS269_CHx_ENG_B_ATI_BASE_MASK         GENMASK(7, 6)
+#define IQS269_CHx_ENG_B_ATI_BASE_75           0x00
+#define IQS269_CHx_ENG_B_ATI_BASE_100          0x40
+#define IQS269_CHx_ENG_B_ATI_BASE_150          0x80
+#define IQS269_CHx_ENG_B_ATI_BASE_200          0xC0
+#define IQS269_CHx_ENG_B_ATI_TARGET_MASK       GENMASK(5, 0)
+#define IQS269_CHx_ENG_B_ATI_TARGET_MAX                2016
+
+#define IQS269_CHx_WEIGHT_MAX                  255
+#define IQS269_CHx_THRESH_MAX                  255
+#define IQS269_CHx_HYST_DEEP_MASK              GENMASK(7, 4)
+#define IQS269_CHx_HYST_DEEP_SHIFT             4
+#define IQS269_CHx_HYST_TOUCH_MASK             GENMASK(3, 0)
+#define IQS269_CHx_HYST_MAX                    15
+
+#define IQS269_CHx_HALL_INACTIVE               6
+#define IQS269_CHx_HALL_ACTIVE                 7
+
+#define IQS269_HALL_PAD_R                      BIT(0)
+#define IQS269_HALL_PAD_L                      BIT(1)
+#define IQS269_HALL_PAD_INV                    BIT(6)
+
+#define IQS269_HALL_UI                         0xF5
+#define IQS269_HALL_UI_ENABLE                  BIT(15)
+
+#define IQS269_MAX_REG                         0xFF
+
+#define IQS269_NUM_CH                          8
+#define IQS269_NUM_SL                          2
+
+#define IQS269_ATI_POLL_SLEEP_US               (iqs269->delay_mult * 10000)
+#define IQS269_ATI_POLL_TIMEOUT_US             (iqs269->delay_mult * 500000)
+#define IQS269_ATI_STABLE_DELAY_MS             (iqs269->delay_mult * 150)
+
+#define IQS269_PWR_MODE_POLL_SLEEP_US          IQS269_ATI_POLL_SLEEP_US
+#define IQS269_PWR_MODE_POLL_TIMEOUT_US                IQS269_ATI_POLL_TIMEOUT_US
+
+#define iqs269_irq_wait()                      usleep_range(100, 150)
+
+enum iqs269_local_cap_size {
+       IQS269_LOCAL_CAP_SIZE_0,
+       IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY,
+       IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5,
+};
+
+enum iqs269_st_offs {
+       IQS269_ST_OFFS_PROX,
+       IQS269_ST_OFFS_DIR,
+       IQS269_ST_OFFS_TOUCH,
+       IQS269_ST_OFFS_DEEP,
+};
+
+enum iqs269_th_offs {
+       IQS269_TH_OFFS_PROX,
+       IQS269_TH_OFFS_TOUCH,
+       IQS269_TH_OFFS_DEEP,
+};
+
+enum iqs269_event_id {
+       IQS269_EVENT_PROX_DN,
+       IQS269_EVENT_PROX_UP,
+       IQS269_EVENT_TOUCH_DN,
+       IQS269_EVENT_TOUCH_UP,
+       IQS269_EVENT_DEEP_DN,
+       IQS269_EVENT_DEEP_UP,
+};
+
+struct iqs269_switch_desc {
+       unsigned int code;
+       bool enabled;
+};
+
+struct iqs269_event_desc {
+       const char *name;
+       enum iqs269_st_offs st_offs;
+       enum iqs269_th_offs th_offs;
+       bool dir_up;
+       u8 mask;
+};
+
+static const struct iqs269_event_desc iqs269_events[] = {
+       [IQS269_EVENT_PROX_DN] = {
+               .name = "event-prox",
+               .st_offs = IQS269_ST_OFFS_PROX,
+               .th_offs = IQS269_TH_OFFS_PROX,
+               .mask = IQS269_EVENT_MASK_PROX,
+       },
+       [IQS269_EVENT_PROX_UP] = {
+               .name = "event-prox-alt",
+               .st_offs = IQS269_ST_OFFS_PROX,
+               .th_offs = IQS269_TH_OFFS_PROX,
+               .dir_up = true,
+               .mask = IQS269_EVENT_MASK_PROX,
+       },
+       [IQS269_EVENT_TOUCH_DN] = {
+               .name = "event-touch",
+               .st_offs = IQS269_ST_OFFS_TOUCH,
+               .th_offs = IQS269_TH_OFFS_TOUCH,
+               .mask = IQS269_EVENT_MASK_TOUCH,
+       },
+       [IQS269_EVENT_TOUCH_UP] = {
+               .name = "event-touch-alt",
+               .st_offs = IQS269_ST_OFFS_TOUCH,
+               .th_offs = IQS269_TH_OFFS_TOUCH,
+               .dir_up = true,
+               .mask = IQS269_EVENT_MASK_TOUCH,
+       },
+       [IQS269_EVENT_DEEP_DN] = {
+               .name = "event-deep",
+               .st_offs = IQS269_ST_OFFS_DEEP,
+               .th_offs = IQS269_TH_OFFS_DEEP,
+               .mask = IQS269_EVENT_MASK_DEEP,
+       },
+       [IQS269_EVENT_DEEP_UP] = {
+               .name = "event-deep-alt",
+               .st_offs = IQS269_ST_OFFS_DEEP,
+               .th_offs = IQS269_TH_OFFS_DEEP,
+               .dir_up = true,
+               .mask = IQS269_EVENT_MASK_DEEP,
+       },
+};
+
+struct iqs269_ver_info {
+       u8 prod_num;
+       u8 sw_num;
+       u8 hw_num;
+       u8 padding;
+} __packed;
+
+struct iqs269_sys_reg {
+       __be16 general;
+       u8 active;
+       u8 filter;
+       u8 reseed;
+       u8 event_mask;
+       u8 rate_np;
+       u8 rate_lp;
+       u8 rate_ulp;
+       u8 timeout_pwr;
+       u8 timeout_rdy;
+       u8 timeout_lta;
+       __be16 misc_a;
+       __be16 misc_b;
+       u8 blocking;
+       u8 padding;
+       u8 slider_select[IQS269_NUM_SL];
+       u8 timeout_tap;
+       u8 timeout_swipe;
+       u8 thresh_swipe;
+       u8 redo_ati;
+} __packed;
+
+struct iqs269_ch_reg {
+       u8 rx_enable;
+       u8 tx_enable;
+       __be16 engine_a;
+       __be16 engine_b;
+       __be16 ati_comp;
+       u8 thresh[3];
+       u8 hyst;
+       u8 assoc_select;
+       u8 assoc_weight;
+} __packed;
+
+struct iqs269_flags {
+       __be16 system;
+       u8 gesture;
+       u8 padding;
+       u8 states[4];
+} __packed;
+
+struct iqs269_private {
+       struct i2c_client *client;
+       struct regmap *regmap;
+       struct mutex lock;
+       struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)];
+       struct iqs269_ch_reg ch_reg[IQS269_NUM_CH];
+       struct iqs269_sys_reg sys_reg;
+       struct input_dev *keypad;
+       struct input_dev *slider[IQS269_NUM_SL];
+       unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH];
+       unsigned int suspend_mode;
+       unsigned int delay_mult;
+       unsigned int ch_num;
+       bool hall_enable;
+       bool ati_current;
+};
+
+static int iqs269_ati_mode_set(struct iqs269_private *iqs269,
+                              unsigned int ch_num, unsigned int mode)
+{
+       u16 engine_a;
+
+       if (ch_num >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       if (mode > IQS269_CHx_ENG_A_ATI_MODE_MAX)
+               return -EINVAL;
+
+       mutex_lock(&iqs269->lock);
+
+       engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a);
+
+       engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK;
+       engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
+
+       iqs269->ch_reg[ch_num].engine_a = cpu_to_be16(engine_a);
+       iqs269->ati_current = false;
+
+       mutex_unlock(&iqs269->lock);
+
+       return 0;
+}
+
+static int iqs269_ati_mode_get(struct iqs269_private *iqs269,
+                              unsigned int ch_num, unsigned int *mode)
+{
+       u16 engine_a;
+
+       if (ch_num >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       mutex_lock(&iqs269->lock);
+       engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a);
+       mutex_unlock(&iqs269->lock);
+
+       engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK;
+       *mode = (engine_a >> IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
+
+       return 0;
+}
+
+static int iqs269_ati_base_set(struct iqs269_private *iqs269,
+                              unsigned int ch_num, unsigned int base)
+{
+       u16 engine_b;
+
+       if (ch_num >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       switch (base) {
+       case 75:
+               base = IQS269_CHx_ENG_B_ATI_BASE_75;
+               break;
+
+       case 100:
+               base = IQS269_CHx_ENG_B_ATI_BASE_100;
+               break;
+
+       case 150:
+               base = IQS269_CHx_ENG_B_ATI_BASE_150;
+               break;
+
+       case 200:
+               base = IQS269_CHx_ENG_B_ATI_BASE_200;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       mutex_lock(&iqs269->lock);
+
+       engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+
+       engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK;
+       engine_b |= base;
+
+       iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
+       iqs269->ati_current = false;
+
+       mutex_unlock(&iqs269->lock);
+
+       return 0;
+}
+
+static int iqs269_ati_base_get(struct iqs269_private *iqs269,
+                              unsigned int ch_num, unsigned int *base)
+{
+       u16 engine_b;
+
+       if (ch_num >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       mutex_lock(&iqs269->lock);
+       engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+       mutex_unlock(&iqs269->lock);
+
+       switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) {
+       case IQS269_CHx_ENG_B_ATI_BASE_75:
+               *base = 75;
+               return 0;
+
+       case IQS269_CHx_ENG_B_ATI_BASE_100:
+               *base = 100;
+               return 0;
+
+       case IQS269_CHx_ENG_B_ATI_BASE_150:
+               *base = 150;
+               return 0;
+
+       case IQS269_CHx_ENG_B_ATI_BASE_200:
+               *base = 200;
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int iqs269_ati_target_set(struct iqs269_private *iqs269,
+                                unsigned int ch_num, unsigned int target)
+{
+       u16 engine_b;
+
+       if (ch_num >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       if (target > IQS269_CHx_ENG_B_ATI_TARGET_MAX)
+               return -EINVAL;
+
+       mutex_lock(&iqs269->lock);
+
+       engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+
+       engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK;
+       engine_b |= target / 32;
+
+       iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
+       iqs269->ati_current = false;
+
+       mutex_unlock(&iqs269->lock);
+
+       return 0;
+}
+
+static int iqs269_ati_target_get(struct iqs269_private *iqs269,
+                                unsigned int ch_num, unsigned int *target)
+{
+       u16 engine_b;
+
+       if (ch_num >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       mutex_lock(&iqs269->lock);
+       engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+       mutex_unlock(&iqs269->lock);
+
+       *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32;
+
+       return 0;
+}
+
+static int iqs269_parse_mask(const struct fwnode_handle *fwnode,
+                            const char *propname, u8 *mask)
+{
+       unsigned int val[IQS269_NUM_CH];
+       int count, error, i;
+
+       count = fwnode_property_count_u32(fwnode, propname);
+       if (count < 0)
+               return 0;
+
+       if (count > IQS269_NUM_CH)
+               return -EINVAL;
+
+       error = fwnode_property_read_u32_array(fwnode, propname, val, count);
+       if (error)
+               return error;
+
+       *mask = 0;
+
+       for (i = 0; i < count; i++) {
+               if (val[i] >= IQS269_NUM_CH)
+                       return -EINVAL;
+
+               *mask |= BIT(val[i]);
+       }
+
+       return 0;
+}
+
+static int iqs269_parse_chan(struct iqs269_private *iqs269,
+                            const struct fwnode_handle *ch_node)
+{
+       struct i2c_client *client = iqs269->client;
+       struct fwnode_handle *ev_node;
+       struct iqs269_ch_reg *ch_reg;
+       u16 engine_a, engine_b;
+       unsigned int reg, val;
+       int error, i;
+
+       error = fwnode_property_read_u32(ch_node, "reg", &reg);
+       if (error) {
+               dev_err(&client->dev, "Failed to read channel number: %d\n",
+                       error);
+               return error;
+       } else if (reg >= IQS269_NUM_CH) {
+               dev_err(&client->dev, "Invalid channel number: %u\n", reg);
+               return -EINVAL;
+       }
+
+       iqs269->sys_reg.active |= BIT(reg);
+       if (!fwnode_property_present(ch_node, "azoteq,reseed-disable"))
+               iqs269->sys_reg.reseed |= BIT(reg);
+
+       if (fwnode_property_present(ch_node, "azoteq,blocking-enable"))
+               iqs269->sys_reg.blocking |= BIT(reg);
+
+       if (fwnode_property_present(ch_node, "azoteq,slider0-select"))
+               iqs269->sys_reg.slider_select[0] |= BIT(reg);
+
+       if (fwnode_property_present(ch_node, "azoteq,slider1-select"))
+               iqs269->sys_reg.slider_select[1] |= BIT(reg);
+
+       ch_reg = &iqs269->ch_reg[reg];
+
+       error = regmap_raw_read(iqs269->regmap,
+                               IQS269_CHx_SETTINGS + reg * sizeof(*ch_reg) / 2,
+                               ch_reg, sizeof(*ch_reg));
+       if (error)
+               return error;
+
+       error = iqs269_parse_mask(ch_node, "azoteq,rx-enable",
+                                 &ch_reg->rx_enable);
+       if (error) {
+               dev_err(&client->dev, "Invalid channel %u RX enable mask: %d\n",
+                       reg, error);
+               return error;
+       }
+
+       error = iqs269_parse_mask(ch_node, "azoteq,tx-enable",
+                                 &ch_reg->tx_enable);
+       if (error) {
+               dev_err(&client->dev, "Invalid channel %u TX enable mask: %d\n",
+                       reg, error);
+               return error;
+       }
+
+       engine_a = be16_to_cpu(ch_reg->engine_a);
+       engine_b = be16_to_cpu(ch_reg->engine_b);
+
+       engine_a |= IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
+       if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease"))
+               engine_a &= ~IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
+
+       engine_a |= IQS269_CHx_ENG_A_RX_GND_INACTIVE;
+       if (fwnode_property_present(ch_node, "azoteq,rx-float-inactive"))
+               engine_a &= ~IQS269_CHx_ENG_A_RX_GND_INACTIVE;
+
+       engine_a &= ~IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
+       engine_b &= ~IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
+       if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size", &val)) {
+               switch (val) {
+               case IQS269_LOCAL_CAP_SIZE_0:
+                       break;
+
+               case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
+                       engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
+
+                       /* fall through */
+
+               case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
+                       engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
+                       break;
+
+               default:
+                       dev_err(&client->dev,
+                               "Invalid channel %u local cap. size: %u\n", reg,
+                               val);
+                       return -EINVAL;
+               }
+       }
+
+       engine_a &= ~IQS269_CHx_ENG_A_INV_LOGIC;
+       if (fwnode_property_present(ch_node, "azoteq,invert-enable"))
+               engine_a |= IQS269_CHx_ENG_A_INV_LOGIC;
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) {
+               if (val > IQS269_CHx_ENG_A_PROJ_BIAS_MAX) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u bias current: %u\n", reg,
+                               val);
+                       return -EINVAL;
+               }
+
+               engine_a &= ~IQS269_CHx_ENG_A_PROJ_BIAS_MASK;
+               engine_a |= (val << IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT);
+       }
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) {
+               if (val > IQS269_CHx_ENG_A_SENSE_MODE_MAX) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u sensing mode: %u\n", reg,
+                               val);
+                       return -EINVAL;
+               }
+
+               engine_a &= ~IQS269_CHx_ENG_A_SENSE_MODE_MASK;
+               engine_a |= val;
+       }
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) {
+               if (val > IQS269_CHx_ENG_B_SENSE_FREQ_MAX) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u sensing frequency: %u\n",
+                               reg, val);
+                       return -EINVAL;
+               }
+
+               engine_b &= ~IQS269_CHx_ENG_B_SENSE_FREQ_MASK;
+               engine_b |= (val << IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT);
+       }
+
+       engine_b &= ~IQS269_CHx_ENG_B_STATIC_ENABLE;
+       if (fwnode_property_present(ch_node, "azoteq,static-enable"))
+               engine_b |= IQS269_CHx_ENG_B_STATIC_ENABLE;
+
+       ch_reg->engine_a = cpu_to_be16(engine_a);
+       ch_reg->engine_b = cpu_to_be16(engine_b);
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) {
+               error = iqs269_ati_mode_set(iqs269, reg, val);
+               if (error) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u ATI mode: %u\n", reg, val);
+                       return error;
+               }
+       }
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) {
+               error = iqs269_ati_base_set(iqs269, reg, val);
+               if (error) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u ATI base: %u\n", reg, val);
+                       return error;
+               }
+       }
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) {
+               error = iqs269_ati_target_set(iqs269, reg, val);
+               if (error) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u ATI target: %u\n", reg,
+                               val);
+                       return error;
+               }
+       }
+
+       error = iqs269_parse_mask(ch_node, "azoteq,assoc-select",
+                                 &ch_reg->assoc_select);
+       if (error) {
+               dev_err(&client->dev, "Invalid channel %u association: %d\n",
+                       reg, error);
+               return error;
+       }
+
+       if (!fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val)) {
+               if (val > IQS269_CHx_WEIGHT_MAX) {
+                       dev_err(&client->dev,
+                               "Invalid channel %u associated weight: %u\n",
+                               reg, val);
+                       return -EINVAL;
+               }
+
+               ch_reg->assoc_weight = val;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+               ev_node = fwnode_get_named_child_node(ch_node,
+                                                     iqs269_events[i].name);
+               if (!ev_node)
+                       continue;
+
+               if (!fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) {
+                       if (val > IQS269_CHx_THRESH_MAX) {
+                               dev_err(&client->dev,
+                                       "Invalid channel %u threshold: %u\n",
+                                       reg, val);
+                               return -EINVAL;
+                       }
+
+                       ch_reg->thresh[iqs269_events[i].th_offs] = val;
+               }
+
+               if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) {
+                       u8 *hyst = &ch_reg->hyst;
+
+                       if (val > IQS269_CHx_HYST_MAX) {
+                               dev_err(&client->dev,
+                                       "Invalid channel %u hysteresis: %u\n",
+                                       reg, val);
+                               return -EINVAL;
+                       }
+
+                       if (i == IQS269_EVENT_DEEP_DN ||
+                           i == IQS269_EVENT_DEEP_UP) {
+                               *hyst &= ~IQS269_CHx_HYST_DEEP_MASK;
+                               *hyst |= (val << IQS269_CHx_HYST_DEEP_SHIFT);
+                       } else if (i == IQS269_EVENT_TOUCH_DN ||
+                                  i == IQS269_EVENT_TOUCH_UP) {
+                               *hyst &= ~IQS269_CHx_HYST_TOUCH_MASK;
+                               *hyst |= val;
+                       }
+               }
+
+               if (fwnode_property_read_u32(ev_node, "linux,code", &val))
+                       continue;
+
+               switch (reg) {
+               case IQS269_CHx_HALL_ACTIVE:
+                       if (iqs269->hall_enable) {
+                               iqs269->switches[i].code = val;
+                               iqs269->switches[i].enabled = true;
+                       }
+
+                       /* fall through */
+
+               case IQS269_CHx_HALL_INACTIVE:
+                       if (iqs269->hall_enable)
+                               break;
+
+                       /* fall through */
+
+               default:
+                       iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
+               }
+
+               iqs269->sys_reg.event_mask &= ~iqs269_events[i].mask;
+       }
+
+       return 0;
+}
+
+static int iqs269_parse_prop(struct iqs269_private *iqs269)
+{
+       struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
+       struct i2c_client *client = iqs269->client;
+       struct fwnode_handle *ch_node;
+       u16 general, misc_a, misc_b;
+       unsigned int val;
+       int error;
+
+       iqs269->hall_enable = device_property_present(&client->dev,
+                                                     "azoteq,hall-enable");
+
+       if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode",
+                                     &val)) {
+               if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) {
+                       dev_err(&client->dev, "Invalid suspend mode: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               iqs269->suspend_mode = val;
+       }
+
+       error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
+                               sizeof(*sys_reg));
+       if (error)
+               return error;
+
+       if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-lta",
+                                     &val)) {
+               if (val > IQS269_FILT_STR_MAX) {
+                       dev_err(&client->dev, "Invalid filter strength: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               sys_reg->filter &= ~IQS269_FILT_STR_LP_LTA_MASK;
+               sys_reg->filter |= (val << IQS269_FILT_STR_LP_LTA_SHIFT);
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-cnt",
+                                     &val)) {
+               if (val > IQS269_FILT_STR_MAX) {
+                       dev_err(&client->dev, "Invalid filter strength: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               sys_reg->filter &= ~IQS269_FILT_STR_LP_CNT_MASK;
+               sys_reg->filter |= (val << IQS269_FILT_STR_LP_CNT_SHIFT);
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-lta",
+                                     &val)) {
+               if (val > IQS269_FILT_STR_MAX) {
+                       dev_err(&client->dev, "Invalid filter strength: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               sys_reg->filter &= ~IQS269_FILT_STR_NP_LTA_MASK;
+               sys_reg->filter |= (val << IQS269_FILT_STR_NP_LTA_SHIFT);
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-cnt",
+                                     &val)) {
+               if (val > IQS269_FILT_STR_MAX) {
+                       dev_err(&client->dev, "Invalid filter strength: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               sys_reg->filter &= ~IQS269_FILT_STR_NP_CNT_MASK;
+               sys_reg->filter |= val;
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms",
+                                     &val)) {
+               if (val > IQS269_RATE_NP_MS_MAX) {
+                       dev_err(&client->dev, "Invalid report rate: %u\n", val);
+                       return -EINVAL;
+               }
+
+               sys_reg->rate_np = val;
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms",
+                                     &val)) {
+               if (val > IQS269_RATE_LP_MS_MAX) {
+                       dev_err(&client->dev, "Invalid report rate: %u\n", val);
+                       return -EINVAL;
+               }
+
+               sys_reg->rate_lp = val;
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms",
+                                     &val)) {
+               if (val > IQS269_RATE_ULP_MS_MAX) {
+                       dev_err(&client->dev, "Invalid report rate: %u\n", val);
+                       return -EINVAL;
+               }
+
+               sys_reg->rate_ulp = val / 16;
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms",
+                                     &val)) {
+               if (val > IQS269_TIMEOUT_PWR_MS_MAX) {
+                       dev_err(&client->dev, "Invalid timeout: %u\n", val);
+                       return -EINVAL;
+               }
+
+               sys_reg->timeout_pwr = val / 512;
+       }
+
+       if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms",
+                                     &val)) {
+               if (val > IQS269_TIMEOUT_LTA_MS_MAX) {
+                       dev_err(&client->dev, "Invalid timeout: %u\n", val);
+                       return -EINVAL;
+               }
+
+               sys_reg->timeout_lta = val / 512;
+       }
+
+       misc_a = be16_to_cpu(sys_reg->misc_a);
+       misc_b = be16_to_cpu(sys_reg->misc_b);
+
+       misc_a &= ~IQS269_MISC_A_ATI_BAND_DISABLE;
+       if (device_property_present(&client->dev, "azoteq,ati-band-disable"))
+               misc_a |= IQS269_MISC_A_ATI_BAND_DISABLE;
+
+       misc_a &= ~IQS269_MISC_A_ATI_LP_ONLY;
+       if (device_property_present(&client->dev, "azoteq,ati-lp-only"))
+               misc_a |= IQS269_MISC_A_ATI_LP_ONLY;
+
+       misc_a &= ~IQS269_MISC_A_ATI_BAND_TIGHTEN;
+       if (device_property_present(&client->dev, "azoteq,ati-band-tighten"))
+               misc_a |= IQS269_MISC_A_ATI_BAND_TIGHTEN;
+
+       misc_a &= ~IQS269_MISC_A_FILT_DISABLE;
+       if (device_property_present(&client->dev, "azoteq,filt-disable"))
+               misc_a |= IQS269_MISC_A_FILT_DISABLE;
+
+       if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select",
+                                     &val)) {
+               if (val >= IQS269_NUM_CH) {
+                       dev_err(&client->dev, "Invalid GPIO3 selection: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               misc_a &= ~IQS269_MISC_A_GPIO3_SELECT_MASK;
+               misc_a |= (val << IQS269_MISC_A_GPIO3_SELECT_SHIFT);
+       }
+
+       misc_a &= ~IQS269_MISC_A_DUAL_DIR;
+       if (device_property_present(&client->dev, "azoteq,dual-direction"))
+               misc_a |= IQS269_MISC_A_DUAL_DIR;
+
+       if (!device_property_read_u32(&client->dev, "azoteq,tx-freq", &val)) {
+               if (val > IQS269_MISC_A_TX_FREQ_MAX) {
+                       dev_err(&client->dev,
+                               "Invalid excitation frequency: %u\n", val);
+                       return -EINVAL;
+               }
+
+               misc_a &= ~IQS269_MISC_A_TX_FREQ_MASK;
+               misc_a |= (val << IQS269_MISC_A_TX_FREQ_SHIFT);
+       }
+
+       misc_a &= ~IQS269_MISC_A_GLOBAL_CAP_SIZE;
+       if (device_property_present(&client->dev, "azoteq,global-cap-increase"))
+               misc_a |= IQS269_MISC_A_GLOBAL_CAP_SIZE;
+
+       if (!device_property_read_u32(&client->dev, "azoteq,reseed-select",
+                                     &val)) {
+               if (val > IQS269_MISC_B_RESEED_UI_SEL_MAX) {
+                       dev_err(&client->dev, "Invalid reseed selection: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               misc_b &= ~IQS269_MISC_B_RESEED_UI_SEL_MASK;
+               misc_b |= (val << IQS269_MISC_B_RESEED_UI_SEL_SHIFT);
+       }
+
+       misc_b &= ~IQS269_MISC_B_TRACKING_UI_ENABLE;
+       if (device_property_present(&client->dev, "azoteq,tracking-enable"))
+               misc_b |= IQS269_MISC_B_TRACKING_UI_ENABLE;
+
+       if (!device_property_read_u32(&client->dev, "azoteq,filt-str-slider",
+                                     &val)) {
+               if (val > IQS269_FILT_STR_MAX) {
+                       dev_err(&client->dev, "Invalid filter strength: %u\n",
+                               val);
+                       return -EINVAL;
+               }
+
+               misc_b &= ~IQS269_MISC_B_FILT_STR_SLIDER;
+               misc_b |= val;
+       }
+
+       sys_reg->misc_a = cpu_to_be16(misc_a);
+       sys_reg->misc_b = cpu_to_be16(misc_b);
+
+       sys_reg->active = 0;
+       sys_reg->reseed = 0;
+
+       sys_reg->blocking = 0;
+
+       sys_reg->slider_select[0] = 0;
+       sys_reg->slider_select[1] = 0;
+
+       sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
+
+       device_for_each_child_node(&client->dev, ch_node) {
+               error = iqs269_parse_chan(iqs269, ch_node);
+               if (error) {
+                       fwnode_handle_put(ch_node);
+                       return error;
+               }
+       }
+
+       /*
+        * Volunteer all active channels to participate in ATI when REDO-ATI is
+        * manually triggered.
+        */
+       sys_reg->redo_ati = sys_reg->active;
+
+       general = be16_to_cpu(sys_reg->general);
+
+       if (device_property_present(&client->dev, "azoteq,clk-div")) {
+               general |= IQS269_SYS_SETTINGS_CLK_DIV;
+               iqs269->delay_mult = 4;
+       } else {
+               general &= ~IQS269_SYS_SETTINGS_CLK_DIV;
+               iqs269->delay_mult = 1;
+       }
+
+       /*
+        * Configure the device to automatically switch between normal and low-
+        * power modes as a function of sensing activity. Ultra-low-power mode,
+        * if enabled, is reserved for suspend.
+        */
+       general &= ~IQS269_SYS_SETTINGS_ULP_AUTO;
+       general &= ~IQS269_SYS_SETTINGS_DIS_AUTO;
+       general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK;
+
+       if (!device_property_read_u32(&client->dev, "azoteq,ulp-update",
+                                     &val)) {
+               if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) {
+                       dev_err(&client->dev, "Invalid update rate: %u\n", val);
+                       return -EINVAL;
+               }
+
+               general &= ~IQS269_SYS_SETTINGS_ULP_UPDATE_MASK;
+               general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT);
+       }
+
+       general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET;
+       if (device_property_present(&client->dev, "azoteq,reseed-offset"))
+               general |= IQS269_SYS_SETTINGS_RESEED_OFFSET;
+
+       general |= IQS269_SYS_SETTINGS_EVENT_MODE;
+
+       /*
+        * As per the datasheet, enable streaming during normal-power mode if
+        * either slider is in use. In that case, the device returns to event
+        * mode during low-power mode.
+        */
+       if (sys_reg->slider_select[0] || sys_reg->slider_select[1])
+               general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP;
+
+       general |= IQS269_SYS_SETTINGS_REDO_ATI;
+       general |= IQS269_SYS_SETTINGS_ACK_RESET;
+
+       sys_reg->general = cpu_to_be16(general);
+
+       return 0;
+}
+
+static int iqs269_dev_init(struct iqs269_private *iqs269)
+{
+       struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
+       struct iqs269_ch_reg *ch_reg;
+       unsigned int val;
+       int error, i;
+
+       mutex_lock(&iqs269->lock);
+
+       error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI,
+                                  IQS269_HALL_UI_ENABLE,
+                                  iqs269->hall_enable ? ~0 : 0);
+       if (error)
+               goto err_mutex;
+
+       for (i = 0; i < IQS269_NUM_CH; i++) {
+               if (!(sys_reg->active & BIT(i)))
+                       continue;
+
+               ch_reg = &iqs269->ch_reg[i];
+
+               error = regmap_raw_write(iqs269->regmap,
+                                        IQS269_CHx_SETTINGS + i *
+                                        sizeof(*ch_reg) / 2, ch_reg,
+                                        sizeof(*ch_reg));
+               if (error)
+                       goto err_mutex;
+       }
+
+       /*
+        * The REDO-ATI and ATI channel selection fields must be written in the
+        * same block write, so every field between registers 0x80 through 0x8B
+        * (inclusive) must be written as well.
+        */
+       error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
+                                sizeof(*sys_reg));
+       if (error)
+               goto err_mutex;
+
+       error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+                                       !(val & IQS269_SYS_FLAGS_IN_ATI),
+                                        IQS269_ATI_POLL_SLEEP_US,
+                                        IQS269_ATI_POLL_TIMEOUT_US);
+       if (error)
+               goto err_mutex;
+
+       msleep(IQS269_ATI_STABLE_DELAY_MS);
+       iqs269->ati_current = true;
+
+err_mutex:
+       mutex_unlock(&iqs269->lock);
+
+       return error;
+}
+
+static int iqs269_input_init(struct iqs269_private *iqs269)
+{
+       struct i2c_client *client = iqs269->client;
+       struct iqs269_flags flags;
+       unsigned int sw_code, keycode;
+       int error, i, j;
+       u8 dir_mask, state;
+
+       iqs269->keypad = devm_input_allocate_device(&client->dev);
+       if (!iqs269->keypad)
+               return -ENOMEM;
+
+       iqs269->keypad->keycodemax = ARRAY_SIZE(iqs269->keycode);
+       iqs269->keypad->keycode = iqs269->keycode;
+       iqs269->keypad->keycodesize = sizeof(*iqs269->keycode);
+
+       iqs269->keypad->name = "iqs269a_keypad";
+       iqs269->keypad->id.bustype = BUS_I2C;
+
+       if (iqs269->hall_enable) {
+               error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS,
+                                       &flags, sizeof(flags));
+               if (error) {
+                       dev_err(&client->dev,
+                               "Failed to read initial status: %d\n", error);
+                       return error;
+               }
+       }
+
+       for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+               dir_mask = flags.states[IQS269_ST_OFFS_DIR];
+               if (!iqs269_events[i].dir_up)
+                       dir_mask = ~dir_mask;
+
+               state = flags.states[iqs269_events[i].st_offs] & dir_mask;
+
+               sw_code = iqs269->switches[i].code;
+
+               for (j = 0; j < IQS269_NUM_CH; j++) {
+                       keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
+
+                       /*
+                        * Hall-effect sensing repurposes a pair of dedicated
+                        * channels, only one of which reports events.
+                        */
+                       switch (j) {
+                       case IQS269_CHx_HALL_ACTIVE:
+                               if (iqs269->hall_enable &&
+                                   iqs269->switches[i].enabled) {
+                                       input_set_capability(iqs269->keypad,
+                                                            EV_SW, sw_code);
+                                       input_report_switch(iqs269->keypad,
+                                                           sw_code,
+                                                           state & BIT(j));
+                               }
+
+                               /* fall through */
+
+                       case IQS269_CHx_HALL_INACTIVE:
+                               if (iqs269->hall_enable)
+                                       continue;
+
+                               /* fall through */
+
+                       default:
+                               if (keycode != KEY_RESERVED)
+                                       input_set_capability(iqs269->keypad,
+                                                            EV_KEY, keycode);
+                       }
+               }
+       }
+
+       input_sync(iqs269->keypad);
+
+       error = input_register_device(iqs269->keypad);
+       if (error) {
+               dev_err(&client->dev, "Failed to register keypad: %d\n", error);
+               return error;
+       }
+
+       for (i = 0; i < IQS269_NUM_SL; i++) {
+               if (!iqs269->sys_reg.slider_select[i])
+                       continue;
+
+               iqs269->slider[i] = devm_input_allocate_device(&client->dev);
+               if (!iqs269->slider[i])
+                       return -ENOMEM;
+
+               iqs269->slider[i]->name = i ? "iqs269a_slider_1"
+                                           : "iqs269a_slider_0";
+               iqs269->slider[i]->id.bustype = BUS_I2C;
+
+               input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH);
+               input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0);
+
+               error = input_register_device(iqs269->slider[i]);
+               if (error) {
+                       dev_err(&client->dev,
+                               "Failed to register slider %d: %d\n", i, error);
+                       return error;
+               }
+       }
+
+       return 0;
+}
+
+static int iqs269_report(struct iqs269_private *iqs269)
+{
+       struct i2c_client *client = iqs269->client;
+       struct iqs269_flags flags;
+       unsigned int sw_code, keycode;
+       int error, i, j;
+       u8 slider_x[IQS269_NUM_SL];
+       u8 dir_mask, state;
+
+       error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, &flags,
+                               sizeof(flags));
+       if (error) {
+               dev_err(&client->dev, "Failed to read device status: %d\n",
+                       error);
+               return error;
+       }
+
+       /*
+        * The device resets itself if its own watchdog bites, which can happen
+        * in the event of an I2C communication error. In this case, the device
+        * asserts a SHOW_RESET interrupt and all registers must be restored.
+        */
+       if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_SHOW_RESET) {
+               dev_err(&client->dev, "Unexpected device reset\n");
+
+               error = iqs269_dev_init(iqs269);
+               if (error)
+                       dev_err(&client->dev,
+                               "Failed to re-initialize device: %d\n", error);
+
+               return error;
+       }
+
+       error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x,
+                               sizeof(slider_x));
+       if (error) {
+               dev_err(&client->dev, "Failed to read slider position: %d\n",
+                       error);
+               return error;
+       }
+
+       for (i = 0; i < IQS269_NUM_SL; i++) {
+               if (!iqs269->sys_reg.slider_select[i])
+                       continue;
+
+               /*
+                * Report BTN_TOUCH if any channel that participates in the
+                * slider is in a state of touch.
+                */
+               if (flags.states[IQS269_ST_OFFS_TOUCH] &
+                   iqs269->sys_reg.slider_select[i]) {
+                       input_report_key(iqs269->slider[i], BTN_TOUCH, 1);
+                       input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]);
+               } else {
+                       input_report_key(iqs269->slider[i], BTN_TOUCH, 0);
+               }
+
+               input_sync(iqs269->slider[i]);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+               dir_mask = flags.states[IQS269_ST_OFFS_DIR];
+               if (!iqs269_events[i].dir_up)
+                       dir_mask = ~dir_mask;
+
+               state = flags.states[iqs269_events[i].st_offs] & dir_mask;
+
+               sw_code = iqs269->switches[i].code;
+
+               for (j = 0; j < IQS269_NUM_CH; j++) {
+                       keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
+
+                       switch (j) {
+                       case IQS269_CHx_HALL_ACTIVE:
+                               if (iqs269->hall_enable &&
+                                   iqs269->switches[i].enabled)
+                                       input_report_switch(iqs269->keypad,
+                                                           sw_code,
+                                                           state & BIT(j));
+
+                               /* fall through */
+
+                       case IQS269_CHx_HALL_INACTIVE:
+                               if (iqs269->hall_enable)
+                                       continue;
+
+                               /* fall through */
+
+                       default:
+                               input_report_key(iqs269->keypad, keycode,
+                                                state & BIT(j));
+                       }
+               }
+       }
+
+       input_sync(iqs269->keypad);
+
+       return 0;
+}
+
+static irqreturn_t iqs269_irq(int irq, void *context)
+{
+       struct iqs269_private *iqs269 = context;
+
+       if (iqs269_report(iqs269))
+               return IRQ_NONE;
+
+       /*
+        * The device does not deassert its interrupt (RDY) pin until shortly
+        * after receiving an I2C stop condition; the following delay ensures
+        * the interrupt handler does not return before this time.
+        */
+       iqs269_irq_wait();
+
+       return IRQ_HANDLED;
+}
+
+static ssize_t counts_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       struct i2c_client *client = iqs269->client;
+       __le16 counts;
+       int error;
+
+       if (!iqs269->ati_current || iqs269->hall_enable)
+               return -EPERM;
+
+       /*
+        * Unsolicited I2C communication prompts the device to assert its RDY
+        * pin, so disable the interrupt line until the operation is finished
+        * and RDY has been deasserted.
+        */
+       disable_irq(client->irq);
+
+       error = regmap_raw_read(iqs269->regmap,
+                               IQS269_CHx_COUNTS + iqs269->ch_num * 2,
+                               &counts, sizeof(counts));
+
+       iqs269_irq_wait();
+       enable_irq(client->irq);
+
+       if (error)
+               return error;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts));
+}
+
+static ssize_t hall_bin_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       struct i2c_client *client = iqs269->client;
+       unsigned int val;
+       int error;
+
+       disable_irq(client->irq);
+
+       error = regmap_read(iqs269->regmap, IQS269_CAL_DATA_A, &val);
+
+       iqs269_irq_wait();
+       enable_irq(client->irq);
+
+       if (error)
+               return error;
+
+       switch (iqs269->ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable &
+               iqs269->ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) {
+       case IQS269_HALL_PAD_R:
+               val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK;
+               val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT;
+               break;
+
+       case IQS269_HALL_PAD_L:
+               val &= IQS269_CAL_DATA_A_HALL_BIN_L_MASK;
+               val >>= IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t hall_enable_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable);
+}
+
+static ssize_t hall_enable_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       mutex_lock(&iqs269->lock);
+
+       iqs269->hall_enable = val;
+       iqs269->ati_current = false;
+
+       mutex_unlock(&iqs269->lock);
+
+       return count;
+}
+
+static ssize_t ch_number_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num);
+}
+
+static ssize_t ch_number_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       if (val >= IQS269_NUM_CH)
+               return -EINVAL;
+
+       iqs269->ch_num = val;
+
+       return count;
+}
+
+static ssize_t rx_enable_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n",
+                        iqs269->ch_reg[iqs269->ch_num].rx_enable);
+}
+
+static ssize_t rx_enable_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mutex_lock(&iqs269->lock);
+
+       iqs269->ch_reg[iqs269->ch_num].rx_enable = val;
+       iqs269->ati_current = false;
+
+       mutex_unlock(&iqs269->lock);
+
+       return count;
+}
+
+static ssize_t ati_mode_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = iqs269_ati_mode_get(iqs269, iqs269->ch_num, &val);
+       if (error)
+               return error;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_mode_store(struct device *dev,
+                             struct device_attribute *attr, const char *buf,
+                             size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       error = iqs269_ati_mode_set(iqs269, iqs269->ch_num, val);
+       if (error)
+               return error;
+
+       return count;
+}
+
+static ssize_t ati_base_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = iqs269_ati_base_get(iqs269, iqs269->ch_num, &val);
+       if (error)
+               return error;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_base_store(struct device *dev,
+                             struct device_attribute *attr, const char *buf,
+                             size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       error = iqs269_ati_base_set(iqs269, iqs269->ch_num, val);
+       if (error)
+               return error;
+
+       return count;
+}
+
+static ssize_t ati_target_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = iqs269_ati_target_get(iqs269, iqs269->ch_num, &val);
+       if (error)
+               return error;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_target_store(struct device *dev,
+                               struct device_attribute *attr, const char *buf,
+                               size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       error = iqs269_ati_target_set(iqs269, iqs269->ch_num, val);
+       if (error)
+               return error;
+
+       return count;
+}
+
+static ssize_t ati_trigger_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ati_current);
+}
+
+static ssize_t ati_trigger_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       struct i2c_client *client = iqs269->client;
+       unsigned int val;
+       int error;
+
+       error = kstrtouint(buf, 10, &val);
+       if (error)
+               return error;
+
+       if (!val)
+               return count;
+
+       disable_irq(client->irq);
+
+       error = iqs269_dev_init(iqs269);
+
+       iqs269_irq_wait();
+       enable_irq(client->irq);
+
+       if (error)
+               return error;
+
+       return count;
+}
+
+static DEVICE_ATTR_RO(counts);
+static DEVICE_ATTR_RO(hall_bin);
+static DEVICE_ATTR_RW(hall_enable);
+static DEVICE_ATTR_RW(ch_number);
+static DEVICE_ATTR_RW(rx_enable);
+static DEVICE_ATTR_RW(ati_mode);
+static DEVICE_ATTR_RW(ati_base);
+static DEVICE_ATTR_RW(ati_target);
+static DEVICE_ATTR_RW(ati_trigger);
+
+static struct attribute *iqs269_attrs[] = {
+       &dev_attr_counts.attr,
+       &dev_attr_hall_bin.attr,
+       &dev_attr_hall_enable.attr,
+       &dev_attr_ch_number.attr,
+       &dev_attr_rx_enable.attr,
+       &dev_attr_ati_mode.attr,
+       &dev_attr_ati_base.attr,
+       &dev_attr_ati_target.attr,
+       &dev_attr_ati_trigger.attr,
+       NULL,
+};
+
+static const struct attribute_group iqs269_attr_group = {
+       .attrs = iqs269_attrs,
+};
+
+static const struct regmap_config iqs269_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .max_register = IQS269_MAX_REG,
+};
+
+static int iqs269_probe(struct i2c_client *client)
+{
+       struct iqs269_ver_info ver_info;
+       struct iqs269_private *iqs269;
+       int error;
+
+       iqs269 = devm_kzalloc(&client->dev, sizeof(*iqs269), GFP_KERNEL);
+       if (!iqs269)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, iqs269);
+       iqs269->client = client;
+
+       iqs269->regmap = devm_regmap_init_i2c(client, &iqs269_regmap_config);
+       if (IS_ERR(iqs269->regmap)) {
+               error = PTR_ERR(iqs269->regmap);
+               dev_err(&client->dev, "Failed to initialize register map: %d\n",
+                       error);
+               return error;
+       }
+
+       mutex_init(&iqs269->lock);
+
+       error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info,
+                               sizeof(ver_info));
+       if (error)
+               return error;
+
+       if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
+               dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
+                       ver_info.prod_num);
+               return -EINVAL;
+       }
+
+       error = iqs269_parse_prop(iqs269);
+       if (error)
+               return error;
+
+       error = iqs269_dev_init(iqs269);
+       if (error) {
+               dev_err(&client->dev, "Failed to initialize device: %d\n",
+                       error);
+               return error;
+       }
+
+       error = iqs269_input_init(iqs269);
+       if (error)
+               return error;
+
+       error = devm_request_threaded_irq(&client->dev, client->irq,
+                                         NULL, iqs269_irq, IRQF_ONESHOT,
+                                         client->name, iqs269);
+       if (error) {
+               dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+               return error;
+       }
+
+       error = devm_device_add_group(&client->dev, &iqs269_attr_group);
+       if (error)
+               dev_err(&client->dev, "Failed to add attributes: %d\n", error);
+
+       return error;
+}
+
+static int __maybe_unused iqs269_suspend(struct device *dev)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       struct i2c_client *client = iqs269->client;
+       unsigned int val;
+       int error;
+
+       if (!iqs269->suspend_mode)
+               return 0;
+
+       disable_irq(client->irq);
+
+       /*
+        * Automatic power mode switching must be disabled before the device is
+        * forced into any particular power mode. In this case, the device will
+        * transition into normal-power mode.
+        */
+       error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+                                  IQS269_SYS_SETTINGS_DIS_AUTO, ~0);
+       if (error)
+               goto err_irq;
+
+       /*
+        * The following check ensures the device has completed its transition
+        * into normal-power mode before a manual mode switch is performed.
+        */
+       error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+                                       !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK),
+                                        IQS269_PWR_MODE_POLL_SLEEP_US,
+                                        IQS269_PWR_MODE_POLL_TIMEOUT_US);
+       if (error)
+               goto err_irq;
+
+       error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+                                  IQS269_SYS_SETTINGS_PWR_MODE_MASK,
+                                  iqs269->suspend_mode <<
+                                  IQS269_SYS_SETTINGS_PWR_MODE_SHIFT);
+       if (error)
+               goto err_irq;
+
+       /*
+        * This last check ensures the device has completed its transition into
+        * the desired power mode to prevent any spurious interrupts from being
+        * triggered after iqs269_suspend has already returned.
+        */
+       error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+                                        (val & IQS269_SYS_FLAGS_PWR_MODE_MASK)
+                                        == (iqs269->suspend_mode <<
+                                            IQS269_SYS_FLAGS_PWR_MODE_SHIFT),
+                                        IQS269_PWR_MODE_POLL_SLEEP_US,
+                                        IQS269_PWR_MODE_POLL_TIMEOUT_US);
+
+err_irq:
+       iqs269_irq_wait();
+       enable_irq(client->irq);
+
+       return error;
+}
+
+static int __maybe_unused iqs269_resume(struct device *dev)
+{
+       struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+       struct i2c_client *client = iqs269->client;
+       unsigned int val;
+       int error;
+
+       if (!iqs269->suspend_mode)
+               return 0;
+
+       disable_irq(client->irq);
+
+       error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+                                  IQS269_SYS_SETTINGS_PWR_MODE_MASK, 0);
+       if (error)
+               goto err_irq;
+
+       /*
+        * This check ensures the device has returned to normal-power mode
+        * before automatic power mode switching is re-enabled.
+        */
+       error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+                                       !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK),
+                                        IQS269_PWR_MODE_POLL_SLEEP_US,
+                                        IQS269_PWR_MODE_POLL_TIMEOUT_US);
+       if (error)
+               goto err_irq;
+
+       error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+                                  IQS269_SYS_SETTINGS_DIS_AUTO, 0);
+       if (error)
+               goto err_irq;
+
+       /*
+        * This step reports any events that may have been "swallowed" as a
+        * result of polling PWR_MODE (which automatically acknowledges any
+        * pending interrupts).
+        */
+       error = iqs269_report(iqs269);
+
+err_irq:
+       iqs269_irq_wait();
+       enable_irq(client->irq);
+
+       return error;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume);
+
+static const struct of_device_id iqs269_of_match[] = {
+       { .compatible = "azoteq,iqs269a" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, iqs269_of_match);
+
+static struct i2c_driver iqs269_i2c_driver = {
+       .driver = {
+               .name = "iqs269a",
+               .of_match_table = iqs269_of_match,
+               .pm = &iqs269_pm,
+       },
+       .probe_new = iqs269_probe,
+};
+module_i2c_driver(iqs269_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS269A Capacitive Touch Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/msm-vibrator.c b/drivers/input/misc/msm-vibrator.c
deleted file mode 100644 (file)
index b60f1aa..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Qualcomm MSM vibrator driver
- *
- * Copyright (c) 2018 Brian Masney <masneyb@onstation.org>
- *
- * Based on qcom,pwm-vibrator.c from:
- * Copyright (c) 2018 Jonathan Marek <jonathan@marek.ca>
- *
- * Based on msm_pwm_vibrator.c from downstream Android sources:
- * Copyright (C) 2009-2014 LGE, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/gpio/consumer.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
-#define REG_CMD_RCGR           0x00
-#define REG_CFG_RCGR           0x04
-#define REG_M                  0x08
-#define REG_N                  0x0C
-#define REG_D                  0x10
-#define REG_CBCR               0x24
-#define MMSS_CC_M_DEFAULT      1
-
-struct msm_vibrator {
-       struct input_dev *input;
-       struct mutex mutex;
-       struct work_struct worker;
-       void __iomem *base;
-       struct regulator *vcc;
-       struct clk *clk;
-       struct gpio_desc *enable_gpio;
-       u16 magnitude;
-       bool enabled;
-};
-
-static void msm_vibrator_write(struct msm_vibrator *vibrator, int offset,
-                              u32 value)
-{
-       writel(value, vibrator->base + offset);
-}
-
-static int msm_vibrator_start(struct msm_vibrator *vibrator)
-{
-       int d_reg_val, ret = 0;
-
-       mutex_lock(&vibrator->mutex);
-
-       if (!vibrator->enabled) {
-               ret = clk_set_rate(vibrator->clk, 24000);
-               if (ret) {
-                       dev_err(&vibrator->input->dev,
-                               "Failed to set clock rate: %d\n", ret);
-                       goto unlock;
-               }
-
-               ret = clk_prepare_enable(vibrator->clk);
-               if (ret) {
-                       dev_err(&vibrator->input->dev,
-                               "Failed to enable clock: %d\n", ret);
-                       goto unlock;
-               }
-
-               ret = regulator_enable(vibrator->vcc);
-               if (ret) {
-                       dev_err(&vibrator->input->dev,
-                               "Failed to enable regulator: %d\n", ret);
-                       clk_disable(vibrator->clk);
-                       goto unlock;
-               }
-
-               gpiod_set_value_cansleep(vibrator->enable_gpio, 1);
-
-               vibrator->enabled = true;
-       }
-
-       d_reg_val = 127 - ((126 * vibrator->magnitude) / 0xffff);
-       msm_vibrator_write(vibrator, REG_CFG_RCGR,
-                          (2 << 12) | /* dual edge mode */
-                          (0 << 8) |  /* cxo */
-                          (7 << 0));
-       msm_vibrator_write(vibrator, REG_M, 1);
-       msm_vibrator_write(vibrator, REG_N, 128);
-       msm_vibrator_write(vibrator, REG_D, d_reg_val);
-       msm_vibrator_write(vibrator, REG_CMD_RCGR, 1);
-       msm_vibrator_write(vibrator, REG_CBCR, 1);
-
-unlock:
-       mutex_unlock(&vibrator->mutex);
-
-       return ret;
-}
-
-static void msm_vibrator_stop(struct msm_vibrator *vibrator)
-{
-       mutex_lock(&vibrator->mutex);
-
-       if (vibrator->enabled) {
-               gpiod_set_value_cansleep(vibrator->enable_gpio, 0);
-               regulator_disable(vibrator->vcc);
-               clk_disable(vibrator->clk);
-               vibrator->enabled = false;
-       }
-
-       mutex_unlock(&vibrator->mutex);
-}
-
-static void msm_vibrator_worker(struct work_struct *work)
-{
-       struct msm_vibrator *vibrator = container_of(work,
-                                                    struct msm_vibrator,
-                                                    worker);
-
-       if (vibrator->magnitude)
-               msm_vibrator_start(vibrator);
-       else
-               msm_vibrator_stop(vibrator);
-}
-
-static int msm_vibrator_play_effect(struct input_dev *dev, void *data,
-                                   struct ff_effect *effect)
-{
-       struct msm_vibrator *vibrator = input_get_drvdata(dev);
-
-       mutex_lock(&vibrator->mutex);
-
-       if (effect->u.rumble.strong_magnitude > 0)
-               vibrator->magnitude = effect->u.rumble.strong_magnitude;
-       else
-               vibrator->magnitude = effect->u.rumble.weak_magnitude;
-
-       mutex_unlock(&vibrator->mutex);
-
-       schedule_work(&vibrator->worker);
-
-       return 0;
-}
-
-static void msm_vibrator_close(struct input_dev *input)
-{
-       struct msm_vibrator *vibrator = input_get_drvdata(input);
-
-       cancel_work_sync(&vibrator->worker);
-       msm_vibrator_stop(vibrator);
-}
-
-static int msm_vibrator_probe(struct platform_device *pdev)
-{
-       struct msm_vibrator *vibrator;
-       struct resource *res;
-       int ret;
-
-       vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
-       if (!vibrator)
-               return -ENOMEM;
-
-       vibrator->input = devm_input_allocate_device(&pdev->dev);
-       if (!vibrator->input)
-               return -ENOMEM;
-
-       vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
-       if (IS_ERR(vibrator->vcc)) {
-               if (PTR_ERR(vibrator->vcc) != -EPROBE_DEFER)
-                       dev_err(&pdev->dev, "Failed to get regulator: %ld\n",
-                               PTR_ERR(vibrator->vcc));
-               return PTR_ERR(vibrator->vcc);
-       }
-
-       vibrator->enable_gpio = devm_gpiod_get(&pdev->dev, "enable",
-                                              GPIOD_OUT_LOW);
-       if (IS_ERR(vibrator->enable_gpio)) {
-               if (PTR_ERR(vibrator->enable_gpio) != -EPROBE_DEFER)
-                       dev_err(&pdev->dev, "Failed to get enable gpio: %ld\n",
-                               PTR_ERR(vibrator->enable_gpio));
-               return PTR_ERR(vibrator->enable_gpio);
-       }
-
-       vibrator->clk = devm_clk_get(&pdev->dev, "pwm");
-       if (IS_ERR(vibrator->clk)) {
-               if (PTR_ERR(vibrator->clk) != -EPROBE_DEFER)
-                       dev_err(&pdev->dev, "Failed to lookup pwm clock: %ld\n",
-                               PTR_ERR(vibrator->clk));
-               return PTR_ERR(vibrator->clk);
-       }
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
-
-       vibrator->base = devm_ioremap(&pdev->dev, res->start,
-                                    resource_size(res));
-       if (!vibrator->base) {
-               dev_err(&pdev->dev, "Failed to iomap resource.\n");
-               return -ENOMEM;
-       }
-
-       vibrator->enabled = false;
-       mutex_init(&vibrator->mutex);
-       INIT_WORK(&vibrator->worker, msm_vibrator_worker);
-
-       vibrator->input->name = "msm-vibrator";
-       vibrator->input->id.bustype = BUS_HOST;
-       vibrator->input->close = msm_vibrator_close;
-
-       input_set_drvdata(vibrator->input, vibrator);
-       input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
-
-       ret = input_ff_create_memless(vibrator->input, NULL,
-                                     msm_vibrator_play_effect);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to create ff memless: %d", ret);
-               return ret;
-       }
-
-       ret = input_register_device(vibrator->input);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to register input device: %d", ret);
-               return ret;
-       }
-
-       platform_set_drvdata(pdev, vibrator);
-
-       return 0;
-}
-
-static int __maybe_unused msm_vibrator_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
-
-       cancel_work_sync(&vibrator->worker);
-
-       if (vibrator->enabled)
-               msm_vibrator_stop(vibrator);
-
-       return 0;
-}
-
-static int __maybe_unused msm_vibrator_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
-
-       if (vibrator->enabled)
-               msm_vibrator_start(vibrator);
-
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(msm_vibrator_pm_ops, msm_vibrator_suspend,
-                        msm_vibrator_resume);
-
-static const struct of_device_id msm_vibrator_of_match[] = {
-       { .compatible = "qcom,msm8226-vibrator" },
-       { .compatible = "qcom,msm8974-vibrator" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, msm_vibrator_of_match);
-
-static struct platform_driver msm_vibrator_driver = {
-       .probe  = msm_vibrator_probe,
-       .driver = {
-               .name = "msm-vibrator",
-               .pm = &msm_vibrator_pm_ops,
-               .of_match_table = of_match_ptr(msm_vibrator_of_match),
-       },
-};
-module_platform_driver(msm_vibrator_driver);
-
-MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
-MODULE_DESCRIPTION("Qualcomm MSM vibrator driver");
-MODULE_LICENSE("GPL");
index 24bc5c5..a1bba72 100644 (file)
@@ -146,7 +146,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info,
                break;
 
        case XENKBD_MT_EV_UP:
-               input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, false);
+               input_mt_report_slot_inactive(info->mtouch);
                break;
 
        case XENKBD_MT_EV_SYN:
index 8719da5..3f9354b 100644 (file)
@@ -938,7 +938,7 @@ static void elan_report_contact(struct elan_tp_data *data,
                input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
        } else {
                input_mt_slot(input, contact_num);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+               input_mt_report_slot_inactive(input);
        }
 }
 
index 7e048b5..7b08ff8 100644 (file)
@@ -945,6 +945,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
        }
        i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
                               sizeof(i8042_kbd_firmware_id));
+       i8042_kbd_fwnode = dev_fwnode(&dev->dev);
 
        /* Keyboard ports are always supposed to be wakeup-enabled */
        device_set_wakeup_enable(&dev->dev, true);
index 20ff2be..0dddf27 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/i8042.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
+#include <linux/property.h>
 
 #include <asm/io.h>
 
@@ -124,6 +125,7 @@ MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive da
 static bool i8042_bypass_aux_irq_test;
 static char i8042_kbd_firmware_id[128];
 static char i8042_aux_firmware_id[128];
+static struct fwnode_handle *i8042_kbd_fwnode;
 
 #include "i8042.h"
 
@@ -1335,6 +1337,7 @@ static int __init i8042_create_kbd_port(void)
        strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
        strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
                sizeof(serio->firmware_id));
+       set_primary_fwnode(&serio->dev, i8042_kbd_fwnode);
 
        port->serio = serio;
        port->irq = I8042_KBD_IRQ;
index c071f7c..35c867b 100644 (file)
@@ -201,6 +201,18 @@ config TOUCHSCREEN_CHIPONE_ICN8505
          To compile this driver as a module, choose M here: the
          module will be called chipone_icn8505.
 
+config TOUCHSCREEN_CY8CTMA140
+       tristate "cy8ctma140 touchscreen"
+       depends on I2C
+       help
+         Say Y here if you have a Cypress CY8CTMA140 capacitive
+         touchscreen also just known as "TMA140"
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cy8ctma140.
+
 config TOUCHSCREEN_CY8CTMG110
        tristate "cy8ctmg110 touchscreen"
        depends on I2C
index 94c6162..30d1e1b 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_BU21013)     += bu21013_ts.o
 obj-$(CONFIG_TOUCHSCREEN_BU21029)      += bu21029_ts.o
 obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318)      += chipone_icn8318.o
 obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505)      += chipone_icn8505.o
+obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140)   += cy8ctma140.o
 obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110)   += cy8ctmg110_ts.o
 obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE)  += cyttsp_core.o
 obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C)   += cyttsp_i2c.o cyttsp_i2c_common.o
index ae60442..a218973 100644 (file)
@@ -822,8 +822,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
                 * have happened.
                 */
                if (status & MXT_T9_RELEASE) {
-                       input_mt_report_slot_state(input_dev,
-                                                  MT_TOOL_FINGER, 0);
+                       input_mt_report_slot_inactive(input_dev);
                        mxt_input_sync(data);
                }
 
@@ -839,7 +838,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
                input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
        } else {
                /* Touch no longer active, close out slot */
-               input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
+               input_mt_report_slot_inactive(input_dev);
        }
 
        data->update_input = true;
@@ -947,7 +946,7 @@ static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
                dev_dbg(dev, "[%u] release\n", id);
 
                /* close out slot */
-               input_mt_report_slot_state(input_dev, 0, 0);
+               input_mt_report_slot_inactive(input_dev);
        }
 
        data->update_input = true;
diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c
new file mode 100644 (file)
index 0000000..a9be291
--- /dev/null
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Cypress CY8CTMA140 (TMA140) touchscreen
+ * (C) 2020 Linus Walleij <linus.walleij@linaro.org>
+ * (C) 2007 Cypress
+ * (C) 2007 Google, Inc.
+ *
+ * Inspired by the tma140_skomer.c driver in the Samsung GT-S7710 code
+ * drop. The GT-S7710 is codenamed "Skomer", the code also indicates
+ * that the same touchscreen was used in a product called "Lucas".
+ *
+ * The code drop for GT-S7710 also contains a firmware downloader and
+ * 15 (!) versions of the firmware drop from Cypress. But here we assume
+ * the firmware got downloaded to the touchscreen flash successfully and
+ * just use it to read the fingers. The shipped vendor driver does the
+ * same.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/input/mt.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#define CY8CTMA140_NAME                        "cy8ctma140"
+
+#define CY8CTMA140_MAX_FINGERS         4
+
+#define CY8CTMA140_GET_FINGERS         0x00
+#define CY8CTMA140_GET_FW_INFO         0x19
+
+/* This message also fits some bytes for touchkeys, if used */
+#define CY8CTMA140_PACKET_SIZE         31
+
+#define CY8CTMA140_INVALID_BUFFER_BIT  5
+
+struct cy8ctma140 {
+       struct input_dev *input;
+       struct touchscreen_properties props;
+       struct device *dev;
+       struct i2c_client *client;
+       struct regulator_bulk_data regulators[2];
+       u8 prev_fingers;
+       u8 prev_f1id;
+       u8 prev_f2id;
+};
+
+static void cy8ctma140_report(struct cy8ctma140 *ts, u8 *data, int n_fingers)
+{
+       static const u8 contact_offsets[] = { 0x03, 0x09, 0x10, 0x16 };
+       u8 *buf;
+       u16 x, y;
+       u8 w;
+       u8 id;
+       int slot;
+       int i;
+
+       for (i = 0; i < n_fingers; i++) {
+               buf = &data[contact_offsets[i]];
+
+               /*
+                * Odd contacts have contact ID in the lower nibble of
+                * the preceding byte, whereas even contacts have it in
+                * the upper nibble of the following byte.
+                */
+               id = i % 2 ? buf[-1] & 0x0f : buf[5] >> 4;
+               slot = input_mt_get_slot_by_key(ts->input, id);
+               if (slot < 0)
+                       continue;
+
+               x = get_unaligned_be16(buf);
+               y = get_unaligned_be16(buf + 2);
+               w = buf[4];
+
+               dev_dbg(ts->dev, "finger %d: ID %02x (%d, %d) w: %d\n",
+                       slot, id, x, y, w);
+
+               input_mt_slot(ts->input, slot);
+               input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
+               touchscreen_report_pos(ts->input, &ts->props, x, y, true);
+               input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, w);
+       }
+
+       input_mt_sync_frame(ts->input);
+       input_sync(ts->input);
+}
+
+static irqreturn_t cy8ctma140_irq_thread(int irq, void *d)
+{
+       struct cy8ctma140 *ts = d;
+       u8 cmdbuf[] = { CY8CTMA140_GET_FINGERS };
+       u8 buf[CY8CTMA140_PACKET_SIZE];
+       struct i2c_msg msg[] = {
+               {
+                       .addr = ts->client->addr,
+                       .flags = 0,
+                       .len = sizeof(cmdbuf),
+                       .buf = cmdbuf,
+               }, {
+                       .addr = ts->client->addr,
+                       .flags = I2C_M_RD,
+                       .len = sizeof(buf),
+                       .buf = buf,
+               },
+       };
+       u8 n_fingers;
+       int ret;
+
+       ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+       if (ret != ARRAY_SIZE(msg)) {
+               if (ret < 0)
+                       dev_err(ts->dev, "error reading message: %d\n", ret);
+               else
+                       dev_err(ts->dev, "wrong number of messages\n");
+               goto out;
+       }
+
+       if (buf[1] & BIT(CY8CTMA140_INVALID_BUFFER_BIT)) {
+               dev_dbg(ts->dev, "invalid event\n");
+               goto out;
+       }
+
+       n_fingers = buf[2] & 0x0f;
+       if (n_fingers > CY8CTMA140_MAX_FINGERS) {
+               dev_err(ts->dev, "unexpected number of fingers: %d\n",
+                       n_fingers);
+               goto out;
+       }
+
+       cy8ctma140_report(ts, buf, n_fingers);
+
+out:
+       return IRQ_HANDLED;
+}
+
+static int cy8ctma140_init(struct cy8ctma140 *ts)
+{
+       u8 addr[1];
+       u8 buf[5];
+       int ret;
+
+       addr[0] = CY8CTMA140_GET_FW_INFO;
+       ret = i2c_master_send(ts->client, addr, 1);
+       if (ret < 0) {
+               dev_err(ts->dev, "error sending FW info message\n");
+               return ret;
+       }
+       ret = i2c_master_recv(ts->client, buf, 5);
+       if (ret < 0) {
+               dev_err(ts->dev, "error receiving FW info message\n");
+               return ret;
+       }
+       if (ret != 5) {
+               dev_err(ts->dev, "got only %d bytes\n", ret);
+               return -EIO;
+       }
+
+       dev_dbg(ts->dev, "vendor %c%c, HW ID %.2d, FW ver %.4d\n",
+               buf[0], buf[1], buf[3], buf[4]);
+
+       return 0;
+}
+
+static int cy8ctma140_power_up(struct cy8ctma140 *ts)
+{
+       int error;
+
+       error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators),
+                                     ts->regulators);
+       if (error) {
+               dev_err(ts->dev, "failed to enable regulators\n");
+               return error;
+       }
+
+       msleep(250);
+
+       return 0;
+}
+
+static void cy8ctma140_power_down(struct cy8ctma140 *ts)
+{
+       regulator_bulk_disable(ARRAY_SIZE(ts->regulators),
+                              ts->regulators);
+}
+
+/* Called from the registered devm action */
+static void cy8ctma140_power_off_action(void *d)
+{
+       struct cy8ctma140 *ts = d;
+
+       cy8ctma140_power_down(ts);
+}
+
+static int cy8ctma140_probe(struct i2c_client *client,
+                           const struct i2c_device_id *id)
+{
+       struct cy8ctma140 *ts;
+       struct input_dev *input;
+       struct device *dev = &client->dev;
+       int error;
+
+       ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+       if (!ts)
+               return -ENOMEM;
+
+       input = devm_input_allocate_device(dev);
+       if (!input)
+               return -ENOMEM;
+
+       ts->dev = dev;
+       ts->client = client;
+       ts->input = input;
+
+       input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+       /* One byte for width 0..255 so this is the limit */
+       input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+       /*
+        * This sets up event max/min capabilities and fuzz.
+        * Some DT properties are compulsory so we do not need
+        * to provide defaults for X/Y max or pressure max.
+        *
+        * We just initialize a very simple MT touchscreen here,
+        * some devices use the capability of this touchscreen to
+        * provide touchkeys, and in that case this needs to be
+        * extended to handle touchkey input.
+        *
+        * The firmware takes care of finger tracking and dropping
+        * invalid ranges.
+        */
+       touchscreen_parse_properties(input, true, &ts->props);
+       input_abs_set_fuzz(input, ABS_MT_POSITION_X, 0);
+       input_abs_set_fuzz(input, ABS_MT_POSITION_Y, 0);
+
+       error = input_mt_init_slots(input, CY8CTMA140_MAX_FINGERS,
+                                 INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+       if (error)
+               return error;
+
+       input->name = CY8CTMA140_NAME;
+       input->id.bustype = BUS_I2C;
+       input_set_drvdata(input, ts);
+
+       /*
+        * VCPIN is the analog voltage supply
+        * VDD is the digital voltage supply
+        * since the voltage range of VDD overlaps that of VCPIN,
+        * many designs to just supply both with a single voltage
+        * source of ~3.3 V.
+        */
+       ts->regulators[0].supply = "vcpin";
+       ts->regulators[1].supply = "vdd";
+       error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
+                                     ts->regulators);
+       if (error) {
+               if (error != -EPROBE_DEFER)
+                       dev_err(dev, "Failed to get regulators %d\n",
+                               error);
+               return error;
+       }
+
+       error = cy8ctma140_power_up(ts);
+       if (error)
+               return error;
+
+       error = devm_add_action_or_reset(dev, cy8ctma140_power_off_action, ts);
+       if (error) {
+               dev_err(dev, "failed to install power off handler\n");
+               return error;
+       }
+
+       error = devm_request_threaded_irq(dev, client->irq,
+                                         NULL, cy8ctma140_irq_thread,
+                                         IRQF_ONESHOT, CY8CTMA140_NAME, ts);
+       if (error) {
+               dev_err(dev, "irq %d busy? error %d\n", client->irq, error);
+               return error;
+       }
+
+       error = cy8ctma140_init(ts);
+       if (error)
+               return error;
+
+       error = input_register_device(input);
+       if (error)
+               return error;
+
+       i2c_set_clientdata(client, ts);
+
+       return 0;
+}
+
+static int __maybe_unused cy8ctma140_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct cy8ctma140 *ts = i2c_get_clientdata(client);
+
+       if (!device_may_wakeup(&client->dev))
+               cy8ctma140_power_down(ts);
+
+       return 0;
+}
+
+static int __maybe_unused cy8ctma140_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct cy8ctma140 *ts = i2c_get_clientdata(client);
+       int error;
+
+       if (!device_may_wakeup(&client->dev)) {
+               error = cy8ctma140_power_up(ts);
+               if (error)
+                       return error;
+       }
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cy8ctma140_pm, cy8ctma140_suspend, cy8ctma140_resume);
+
+static const struct i2c_device_id cy8ctma140_idtable[] = {
+       { CY8CTMA140_NAME, 0 },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, cy8ctma140_idtable);
+
+static const struct of_device_id cy8ctma140_of_match[] = {
+       { .compatible = "cypress,cy8ctma140", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cy8ctma140_of_match);
+
+static struct i2c_driver cy8ctma140_driver = {
+       .driver         = {
+               .name   = CY8CTMA140_NAME,
+               .pm     = &cy8ctma140_pm,
+               .of_match_table = cy8ctma140_of_match,
+       },
+       .id_table       = cy8ctma140_idtable,
+       .probe          = cy8ctma140_probe,
+};
+module_i2c_driver(cy8ctma140_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("CY8CTMA140 TouchScreen Driver");
+MODULE_LICENSE("GPL v2");
index 6bcffc9..02a73d9 100644 (file)
@@ -744,8 +744,7 @@ static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md,
 
        for (t = 0; t < max_slots; t++) {
                input_mt_slot(md->input, t);
-               input_mt_report_slot_state(md->input,
-                       MT_TOOL_FINGER, false);
+               input_mt_report_slot_inactive(md->input);
        }
 }
 
@@ -845,7 +844,7 @@ static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids)
                if (ids[t])
                        continue;
                input_mt_slot(input, t);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+               input_mt_report_slot_inactive(input);
        }
 
        input_sync(input);
index 3f5d463..697aa2c 100644 (file)
@@ -340,7 +340,7 @@ static void cyttsp_report_tchdata(struct cyttsp *ts)
                        continue;
 
                input_mt_slot(input, i);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+               input_mt_report_slot_inactive(input);
        }
 
        input_sync(input);
index d258772..3a4f18d 100644 (file)
@@ -38,6 +38,9 @@
 #define WORK_REGISTER_NUM_X            0x33
 #define WORK_REGISTER_NUM_Y            0x34
 
+#define PMOD_REGISTER_ACTIVE           0x00
+#define PMOD_REGISTER_HIBERNATE                0x03
+
 #define M09_REGISTER_THRESHOLD         0x80
 #define M09_REGISTER_GAIN              0x92
 #define M09_REGISTER_OFFSET            0x93
@@ -53,6 +56,7 @@
 
 #define WORK_REGISTER_OPMODE           0x3c
 #define FACTORY_REGISTER_OPMODE                0x01
+#define PMOD_REGISTER_OPMODE           0xa5
 
 #define TOUCH_EVENT_DOWN               0x00
 #define TOUCH_EVENT_UP                 0x01
 #define EDT_RAW_DATA_RETRIES           100
 #define EDT_RAW_DATA_DELAY             1000 /* usec */
 
+enum edt_pmode {
+       EDT_PMODE_NOT_SUPPORTED,
+       EDT_PMODE_HIBERNATE,
+       EDT_PMODE_POWEROFF,
+};
+
 enum edt_ver {
        EDT_M06,
        EDT_M09,
@@ -103,6 +113,7 @@ struct edt_ft5x06_ts_data {
 
        struct mutex mutex;
        bool factory_mode;
+       enum edt_pmode suspend_mode;
        int threshold;
        int gain;
        int offset;
@@ -527,6 +538,29 @@ static const struct attribute_group edt_ft5x06_attr_group = {
        .attrs = edt_ft5x06_attrs,
 };
 
+static void edt_ft5x06_restore_reg_parameters(struct edt_ft5x06_ts_data *tsdata)
+{
+       struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
+       edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
+                                 tsdata->threshold);
+       edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
+                                 tsdata->gain);
+       if (reg_addr->reg_offset != NO_REGISTER)
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
+                                         tsdata->offset);
+       if (reg_addr->reg_offset_x != NO_REGISTER)
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
+                                         tsdata->offset_x);
+       if (reg_addr->reg_offset_y != NO_REGISTER)
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
+                                         tsdata->offset_y);
+       if (reg_addr->reg_report_rate != NO_REGISTER)
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
+                                 tsdata->report_rate);
+
+}
+
 #ifdef CONFIG_DEBUG_FS
 static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
 {
@@ -592,7 +626,6 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
 {
        struct i2c_client *client = tsdata->client;
        int retries = EDT_SWITCH_MODE_RETRIES;
-       struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
        int ret;
        int error;
 
@@ -624,24 +657,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
        kfree(tsdata->raw_buffer);
        tsdata->raw_buffer = NULL;
 
-       /* restore parameters */
-       edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
-                                 tsdata->threshold);
-       edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
-                                 tsdata->gain);
-       if (reg_addr->reg_offset != NO_REGISTER)
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
-                                         tsdata->offset);
-       if (reg_addr->reg_offset_x != NO_REGISTER)
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
-                                         tsdata->offset_x);
-       if (reg_addr->reg_offset_y != NO_REGISTER)
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
-                                         tsdata->offset_y);
-       if (reg_addr->reg_report_rate != NO_REGISTER)
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
-                                 tsdata->report_rate);
-
+       edt_ft5x06_restore_reg_parameters(tsdata);
        enable_irq(client->irq);
 
        return 0;
@@ -762,9 +778,8 @@ static const struct file_operations debugfs_raw_data_fops = {
        .read = edt_ft5x06_debugfs_raw_data_read,
 };
 
-static void
-edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
-                             const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+                                         const char *debugfs_name)
 {
        tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL);
 
@@ -777,8 +792,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
                            tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
 }
 
-static void
-edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
 {
        debugfs_remove_recursive(tsdata->debug_dir);
        kfree(tsdata->raw_buffer);
@@ -786,14 +800,17 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
 
 #else
 
-static inline void
-edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
-                             const char *debugfs_name)
+static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+{
+       return -ENOSYS;
+}
+
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+                                         const char *debugfs_name)
 {
 }
 
-static inline void
-edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
 {
 }
 
@@ -938,19 +955,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
 
        error = device_property_read_u32(dev, "offset", &val);
        if (!error) {
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+               if (reg_addr->reg_offset != NO_REGISTER)
+                       edt_ft5x06_register_write(tsdata,
+                                                 reg_addr->reg_offset, val);
                tsdata->offset = val;
        }
 
        error = device_property_read_u32(dev, "offset-x", &val);
        if (!error) {
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val);
+               if (reg_addr->reg_offset_x != NO_REGISTER)
+                       edt_ft5x06_register_write(tsdata,
+                                                 reg_addr->reg_offset_x, val);
                tsdata->offset_x = val;
        }
 
        error = device_property_read_u32(dev, "offset-y", &val);
        if (!error) {
-               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val);
+               if (reg_addr->reg_offset_y != NO_REGISTER)
+                       edt_ft5x06_register_write(tsdata,
+                                                 reg_addr->reg_offset_y, val);
                tsdata->offset_y = val;
        }
 }
@@ -1114,6 +1137,19 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
                return error;
        }
 
+       /*
+        * Check which sleep modes we can support. Power-off requieres the
+        * reset-pin to ensure correct power-down/power-up behaviour. Start with
+        * the EDT_PMODE_POWEROFF test since this is the deepest possible sleep
+        * mode.
+        */
+       if (tsdata->reset_gpio)
+               tsdata->suspend_mode = EDT_PMODE_POWEROFF;
+       else if (tsdata->wake_gpio)
+               tsdata->suspend_mode = EDT_PMODE_HIBERNATE;
+       else
+               tsdata->suspend_mode = EDT_PMODE_NOT_SUPPORTED;
+
        if (tsdata->wake_gpio) {
                usleep_range(5000, 6000);
                gpiod_set_value_cansleep(tsdata->wake_gpio, 1);
@@ -1227,6 +1263,102 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
        return 0;
 }
 
+static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+       struct gpio_desc *reset_gpio = tsdata->reset_gpio;
+       int ret;
+
+       if (device_may_wakeup(dev))
+               return 0;
+
+       if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED)
+               return 0;
+
+       /* Enter hibernate mode. */
+       ret = edt_ft5x06_register_write(tsdata, PMOD_REGISTER_OPMODE,
+                                       PMOD_REGISTER_HIBERNATE);
+       if (ret)
+               dev_warn(dev, "Failed to set hibernate mode\n");
+
+       if (tsdata->suspend_mode == EDT_PMODE_HIBERNATE)
+               return 0;
+
+       /*
+        * Power-off according the datasheet. Cut the power may leaf the irq
+        * line in an undefined state depending on the host pull resistor
+        * settings. Disable the irq to avoid adjusting each host till the
+        * device is back in a full functional state.
+        */
+       disable_irq(tsdata->client->irq);
+
+       gpiod_set_value_cansleep(reset_gpio, 1);
+       usleep_range(1000, 2000);
+
+       ret = regulator_disable(tsdata->vcc);
+       if (ret)
+               dev_warn(dev, "Failed to disable vcc\n");
+
+       return 0;
+}
+
+static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+       int ret = 0;
+
+       if (device_may_wakeup(dev))
+               return 0;
+
+       if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED)
+               return 0;
+
+       if (tsdata->suspend_mode == EDT_PMODE_POWEROFF) {
+               struct gpio_desc *reset_gpio = tsdata->reset_gpio;
+
+               /*
+                * We can't check if the regulator is a dummy or a real
+                * regulator. So we need to specify the 5ms reset time (T_rst)
+                * here instead of the 100us T_rtp time. We also need to wait
+                * 300ms in case it was a real supply and the power was cutted
+                * of. Toggle the reset pin is also a way to exit the hibernate
+                * mode.
+                */
+               gpiod_set_value_cansleep(reset_gpio, 1);
+               usleep_range(5000, 6000);
+
+               ret = regulator_enable(tsdata->vcc);
+               if (ret) {
+                       dev_err(dev, "Failed to enable vcc\n");
+                       return ret;
+               }
+
+               usleep_range(1000, 2000);
+               gpiod_set_value_cansleep(reset_gpio, 0);
+               msleep(300);
+
+               edt_ft5x06_restore_reg_parameters(tsdata);
+               enable_irq(tsdata->client->irq);
+
+               if (tsdata->factory_mode)
+                       ret = edt_ft5x06_factory_mode(tsdata);
+       } else {
+               struct gpio_desc *wake_gpio = tsdata->wake_gpio;
+
+               gpiod_set_value_cansleep(wake_gpio, 0);
+               usleep_range(5000, 6000);
+               gpiod_set_value_cansleep(wake_gpio, 1);
+       }
+
+
+       return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
+                        edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
+
 static const struct edt_i2c_chip_data edt_ft5x06_data = {
        .max_support_points = 5,
 };
@@ -1265,6 +1397,8 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
        .driver = {
                .name = "edt_ft5x06",
                .of_match_table = edt_ft5x06_of_match,
+               .pm = &edt_ft5x06_ts_pm_ops,
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        },
        .id_table = edt_ft5x06_ts_id,
        .probe    = edt_ft5x06_ts_probe,
index 2289f96..233cb10 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
 #include <linux/gpio/consumer.h>
@@ -89,6 +90,7 @@
 /* FW read command, 0x53 0x?? 0x0, 0x01 */
 #define E_ELAN_INFO_FW_VER     0x00
 #define E_ELAN_INFO_BC_VER     0x10
+#define E_ELAN_INFO_REK                0xE0
 #define E_ELAN_INFO_TEST_VER   0xE0
 #define E_ELAN_INFO_FW_ID      0xF0
 #define E_INFO_OSR             0xD6
@@ -136,6 +138,7 @@ struct elants_data {
        unsigned int y_res;
        unsigned int x_max;
        unsigned int y_max;
+       struct touchscreen_properties prop;
 
        enum elants_state state;
        enum elants_iap_mode iap_mode;
@@ -189,7 +192,8 @@ static int elants_i2c_read(struct i2c_client *client, void *data, size_t size)
 
 static int elants_i2c_execute_command(struct i2c_client *client,
                                      const u8 *cmd, size_t cmd_size,
-                                     u8 *resp, size_t resp_size)
+                                     u8 *resp, size_t resp_size,
+                                     int retries, const char *cmd_name)
 {
        struct i2c_msg msgs[2];
        int ret;
@@ -209,30 +213,55 @@ static int elants_i2c_execute_command(struct i2c_client *client,
                break;
 
        default:
-               dev_err(&client->dev, "%s: invalid command %*ph\n",
-                       __func__, (int)cmd_size, cmd);
+               dev_err(&client->dev, "(%s): invalid command: %*ph\n",
+                       cmd_name, (int)cmd_size, cmd);
                return -EINVAL;
        }
 
-       msgs[0].addr = client->addr;
-       msgs[0].flags = client->flags & I2C_M_TEN;
-       msgs[0].len = cmd_size;
-       msgs[0].buf = (u8 *)cmd;
+       for (;;) {
+               msgs[0].addr = client->addr;
+               msgs[0].flags = client->flags & I2C_M_TEN;
+               msgs[0].len = cmd_size;
+               msgs[0].buf = (u8 *)cmd;
+
+               msgs[1].addr = client->addr;
+               msgs[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
+               msgs[1].flags |= I2C_M_RD;
+               msgs[1].len = resp_size;
+               msgs[1].buf = resp;
+
+               ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+               if (ret < 0) {
+                       if (--retries > 0) {
+                               dev_dbg(&client->dev,
+                                       "(%s) I2C transfer failed: %pe (retrying)\n",
+                                       cmd_name, ERR_PTR(ret));
+                               continue;
+                       }
 
-       msgs[1].addr = client->addr;
-       msgs[1].flags = client->flags & I2C_M_TEN;
-       msgs[1].flags |= I2C_M_RD;
-       msgs[1].len = resp_size;
-       msgs[1].buf = resp;
+                       dev_err(&client->dev,
+                               "(%s) I2C transfer failed: %pe\n",
+                               cmd_name, ERR_PTR(ret));
+                       return ret;
+               }
 
-       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-       if (ret < 0)
-               return ret;
+               if (ret != ARRAY_SIZE(msgs) ||
+                   resp[FW_HDR_TYPE] != expected_response) {
+                       if (--retries > 0) {
+                               dev_dbg(&client->dev,
+                                       "(%s) unexpected response: %*ph (retrying)\n",
+                                       cmd_name, ret, resp);
+                               continue;
+                       }
 
-       if (ret != ARRAY_SIZE(msgs) || resp[FW_HDR_TYPE] != expected_response)
-               return -EIO;
+                       dev_err(&client->dev,
+                               "(%s) unexpected response: %*ph\n",
+                               cmd_name, ret, resp);
+                       return -EIO;
+               }
 
-       return 0;
+               return 0;
+       }
 }
 
 static int elants_i2c_calibrate(struct elants_data *ts)
@@ -305,27 +334,21 @@ static u16 elants_i2c_parse_version(u8 *buf)
 static int elants_i2c_query_hw_version(struct elants_data *ts)
 {
        struct i2c_client *client = ts->client;
-       int error, retry_cnt;
+       int retry_cnt = MAX_RETRIES;
        const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_ID, 0x00, 0x01 };
        u8 resp[HEADER_SIZE];
+       int error;
 
-       for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+       while (retry_cnt--) {
                error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
-                                                  resp, sizeof(resp));
-               if (!error) {
-                       ts->hw_version = elants_i2c_parse_version(resp);
-                       if (ts->hw_version != 0xffff)
-                               return 0;
-               }
-
-               dev_dbg(&client->dev, "read fw id error=%d, buf=%*phC\n",
-                       error, (int)sizeof(resp), resp);
-       }
+                                                  resp, sizeof(resp), 1,
+                                                  "read fw id");
+               if (error)
+                       return error;
 
-       if (error) {
-               dev_err(&client->dev,
-                       "Failed to read fw id: %d\n", error);
-               return error;
+               ts->hw_version = elants_i2c_parse_version(resp);
+               if (ts->hw_version != 0xffff)
+                       return 0;
        }
 
        dev_err(&client->dev, "Invalid fw id: %#04x\n", ts->hw_version);
@@ -336,26 +359,27 @@ static int elants_i2c_query_hw_version(struct elants_data *ts)
 static int elants_i2c_query_fw_version(struct elants_data *ts)
 {
        struct i2c_client *client = ts->client;
-       int error, retry_cnt;
+       int retry_cnt = MAX_RETRIES;
        const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_VER, 0x00, 0x01 };
        u8 resp[HEADER_SIZE];
+       int error;
 
-       for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+       while (retry_cnt--) {
                error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
-                                                  resp, sizeof(resp));
-               if (!error) {
-                       ts->fw_version = elants_i2c_parse_version(resp);
-                       if (ts->fw_version != 0x0000 &&
-                           ts->fw_version != 0xffff)
-                               return 0;
-               }
+                                                  resp, sizeof(resp), 1,
+                                                  "read fw version");
+               if (error)
+                       return error;
+
+               ts->fw_version = elants_i2c_parse_version(resp);
+               if (ts->fw_version != 0x0000 && ts->fw_version != 0xffff)
+                       return 0;
 
-               dev_dbg(&client->dev, "read fw version error=%d, buf=%*phC\n",
-                       error, (int)sizeof(resp), resp);
+               dev_dbg(&client->dev, "(read fw version) resp %*phC\n",
+                       (int)sizeof(resp), resp);
        }
 
-       dev_err(&client->dev,
-               "Failed to read fw version or fw version is invalid\n");
+       dev_err(&client->dev, "Invalid fw ver: %#04x\n", ts->fw_version);
 
        return -EINVAL;
 }
@@ -363,30 +387,24 @@ static int elants_i2c_query_fw_version(struct elants_data *ts)
 static int elants_i2c_query_test_version(struct elants_data *ts)
 {
        struct i2c_client *client = ts->client;
-       int error, retry_cnt;
+       int error;
        u16 version;
        const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_TEST_VER, 0x00, 0x01 };
        u8 resp[HEADER_SIZE];
 
-       for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-               error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
-                                                  resp, sizeof(resp));
-               if (!error) {
-                       version = elants_i2c_parse_version(resp);
-                       ts->test_version = version >> 8;
-                       ts->solution_version = version & 0xff;
-
-                       return 0;
-               }
-
-               dev_dbg(&client->dev,
-                       "read test version error rc=%d, buf=%*phC\n",
-                       error, (int)sizeof(resp), resp);
+       error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+                                          resp, sizeof(resp), MAX_RETRIES,
+                                          "read test version");
+       if (error) {
+               dev_err(&client->dev, "Failed to read test version\n");
+               return error;
        }
 
-       dev_err(&client->dev, "Failed to read test version\n");
+       version = elants_i2c_parse_version(resp);
+       ts->test_version = version >> 8;
+       ts->solution_version = version & 0xff;
 
-       return -EINVAL;
+       return 0;
 }
 
 static int elants_i2c_query_bc_version(struct elants_data *ts)
@@ -398,13 +416,10 @@ static int elants_i2c_query_bc_version(struct elants_data *ts)
        int error;
 
        error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
-                                          resp, sizeof(resp));
-       if (error) {
-               dev_err(&client->dev,
-                       "read BC version error=%d, buf=%*phC\n",
-                       error, (int)sizeof(resp), resp);
+                                          resp, sizeof(resp), 1,
+                                          "read BC version");
+       if (error)
                return error;
-       }
 
        version = elants_i2c_parse_version(resp);
        ts->bc_version = version >> 8;
@@ -436,12 +451,10 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
        error = elants_i2c_execute_command(client,
                                           get_resolution_cmd,
                                           sizeof(get_resolution_cmd),
-                                          resp, sizeof(resp));
-       if (error) {
-               dev_err(&client->dev, "get resolution command failed: %d\n",
-                       error);
+                                          resp, sizeof(resp), 1,
+                                          "get resolution");
+       if (error)
                return error;
-       }
 
        rows = resp[2] + resp[6] + resp[10];
        cols = resp[3] + resp[7] + resp[11];
@@ -449,36 +462,29 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
        /* Process mm_to_pixel information */
        error = elants_i2c_execute_command(client,
                                           get_osr_cmd, sizeof(get_osr_cmd),
-                                          resp, sizeof(resp));
-       if (error) {
-               dev_err(&client->dev, "get osr command failed: %d\n",
-                       error);
+                                          resp, sizeof(resp), 1, "get osr");
+       if (error)
                return error;
-       }
 
        osr = resp[3];
 
        error = elants_i2c_execute_command(client,
                                           get_physical_scan_cmd,
                                           sizeof(get_physical_scan_cmd),
-                                          resp, sizeof(resp));
-       if (error) {
-               dev_err(&client->dev, "get physical scan command failed: %d\n",
-                       error);
+                                          resp, sizeof(resp), 1,
+                                          "get physical scan");
+       if (error)
                return error;
-       }
 
        phy_x = get_unaligned_be16(&resp[2]);
 
        error = elants_i2c_execute_command(client,
                                           get_physical_drive_cmd,
                                           sizeof(get_physical_drive_cmd),
-                                          resp, sizeof(resp));
-       if (error) {
-               dev_err(&client->dev, "get physical drive command failed: %d\n",
-                       error);
+                                          resp, sizeof(resp), 1,
+                                          "get physical drive");
+       if (error)
                return error;
-       }
 
        phy_y = get_unaligned_be16(&resp[2]);
 
@@ -633,11 +639,10 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
 
        /* Compare TS Remark ID and FW Remark ID */
        error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
-                                       resp, sizeof(resp));
-       if (error) {
-               dev_err(&client->dev, "failed to query Remark ID: %d\n", error);
+                                          resp, sizeof(resp),
+                                          1, "read Remark ID");
+       if (error)
                return error;
-       }
 
        ts_remark_id = get_unaligned_be16(&resp[3]);
 
@@ -875,8 +880,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
 
                        input_mt_slot(input, i);
                        input_mt_report_slot_state(input, tool_type, true);
-                       input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
-                       input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
+                       touchscreen_report_pos(input, &ts->prop, x, y, true);
                        input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
                        input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, w);
 
@@ -1017,7 +1021,7 @@ out:
  */
 static ssize_t calibrate_store(struct device *dev,
                               struct device_attribute *attr,
-                             const char *buf, size_t count)
+                              const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct elants_data *ts = i2c_get_clientdata(client);
@@ -1063,8 +1067,28 @@ static ssize_t show_iap_mode(struct device *dev,
                                "Normal" : "Recovery");
 }
 
+static ssize_t show_calibration_count(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_REK, 0x00, 0x01 };
+       u8 resp[HEADER_SIZE];
+       u16 rek_count;
+       int error;
+
+       error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+                                          resp, sizeof(resp), 1,
+                                          "read ReK status");
+       if (error)
+               return sprintf(buf, "%d\n", error);
+
+       rek_count = get_unaligned_be16(&resp[2]);
+       return sprintf(buf, "0x%04x\n", rek_count);
+}
+
 static DEVICE_ATTR_WO(calibrate);
 static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
+static DEVICE_ATTR(calibration_count, S_IRUGO, show_calibration_count, NULL);
 static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
 
 struct elants_version_attribute {
@@ -1120,6 +1144,7 @@ static struct attribute *elants_attributes[] = {
        &dev_attr_calibrate.attr,
        &dev_attr_update_fw.attr,
        &dev_attr_iap_mode.attr,
+       &dev_attr_calibration_count.attr,
 
        &elants_ver_attr_fw_version.dattr.attr,
        &elants_ver_attr_hw_version.dattr.attr,
@@ -1290,25 +1315,7 @@ static int elants_i2c_probe(struct i2c_client *client,
        ts->input->name = "Elan Touchscreen";
        ts->input->id.bustype = BUS_I2C;
 
-       __set_bit(BTN_TOUCH, ts->input->keybit);
-       __set_bit(EV_ABS, ts->input->evbit);
-       __set_bit(EV_KEY, ts->input->evbit);
-
-       /* Single touch input params setup */
-       input_set_abs_params(ts->input, ABS_X, 0, ts->x_max, 0, 0);
-       input_set_abs_params(ts->input, ABS_Y, 0, ts->y_max, 0, 0);
-       input_set_abs_params(ts->input, ABS_PRESSURE, 0, 255, 0, 0);
-       input_abs_set_res(ts->input, ABS_X, ts->x_res);
-       input_abs_set_res(ts->input, ABS_Y, ts->y_res);
-
        /* Multitouch input params setup */
-       error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
-                                   INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
-       if (error) {
-               dev_err(&client->dev,
-                       "failed to initialize MT slots: %d\n", error);
-               return error;
-       }
 
        input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
        input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
@@ -1320,6 +1327,16 @@ static int elants_i2c_probe(struct i2c_client *client,
        input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
        input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
 
+       touchscreen_parse_properties(ts->input, true, &ts->prop);
+
+       error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
+                                   INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+       if (error) {
+               dev_err(&client->dev,
+                       "failed to initialize MT slots: %d\n", error);
+               return error;
+       }
+
        error = input_register_device(ts->input);
        if (error) {
                dev_err(&client->dev,
index 247c3aa..f67efdd 100644 (file)
@@ -391,7 +391,7 @@ static void mip4_clear_input(struct mip4_ts *ts)
        /* Screen */
        for (i = 0; i < MIP4_MAX_FINGERS; i++) {
                input_mt_slot(ts->input, i);
-               input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+               input_mt_report_slot_inactive(ts->input);
        }
 
        /* Keys */
@@ -534,7 +534,7 @@ static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
        } else {
                /* Release event */
                input_mt_slot(ts->input, id);
-               input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+               input_mt_report_slot_inactive(ts->input);
        }
 
        input_mt_sync_frame(ts->input);
index 2ef1ada..1f96657 100644 (file)
@@ -54,6 +54,7 @@
 enum mms_type {
        TYPE_MMS114     = 114,
        TYPE_MMS152     = 152,
+       TYPE_MMS345L    = 345,
 };
 
 struct mms114_data {
@@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data)
        int error;
 
        switch (data->type) {
+       case TYPE_MMS345L:
+               error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
+               if (error)
+                       return error;
+
+               dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n",
+                        buf[0], buf[1], buf[2]);
+               break;
+
        case TYPE_MMS152:
                error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
                if (error)
@@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data)
        if (error < 0)
                return error;
 
-       /* MMS152 has no configuration or power on registers */
-       if (data->type == TYPE_MMS152)
+       /* Only MMS114 has configuration and power on registers */
+       if (data->type != TYPE_MMS114)
                return 0;
 
        error = mms114_set_active(data, true);
@@ -547,7 +557,7 @@ static int __maybe_unused mms114_suspend(struct device *dev)
        /* Release all touch */
        for (id = 0; id < MMS114_MAX_TOUCH; id++) {
                input_mt_slot(input_dev, id);
-               input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, false);
+               input_mt_report_slot_inactive(input_dev);
        }
 
        input_mt_report_pointer_emulation(input_dev, true);
@@ -597,6 +607,9 @@ static const struct of_device_id mms114_dt_match[] = {
        }, {
                .compatible = "melfas,mms152",
                .data = (void *)TYPE_MMS152,
+       }, {
+               .compatible = "melfas,mms345l",
+               .data = (void *)TYPE_MMS345L,
        },
        { }
 };
index 0e2e08f..ef6aaed 100644 (file)
@@ -100,7 +100,7 @@ static void rpi_ts_poll(struct input_dev *input)
        released_ids = ts->known_ids & ~modified_ids;
        for_each_set_bit(i, &released_ids, RPI_TS_MAX_SUPPORTED_POINTS) {
                input_mt_slot(input, i);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER, 0);
+               input_mt_report_slot_inactive(input);
                modified_ids &= ~(BIT(i));
        }
        ts->known_ids = modified_ids;
index b6f95f2..b54cc64 100644 (file)
@@ -198,7 +198,7 @@ static void stmfts_report_contact_release(struct stmfts_data *sdata,
        u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4;
 
        input_mt_slot(sdata->input, slot_id);
-       input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false);
+       input_mt_report_slot_inactive(sdata->input);
 
        input_sync(sdata->input);
 }
index ece2a57..e5f9987 100644 (file)
@@ -543,6 +543,24 @@ void icc_set_tag(struct icc_path *path, u32 tag)
 }
 EXPORT_SYMBOL_GPL(icc_set_tag);
 
+/**
+ * icc_get_name() - Get name of the icc path
+ * @path: reference to the path returned by icc_get()
+ *
+ * This function is used by an interconnect consumer to get the name of the icc
+ * path.
+ *
+ * Returns a valid pointer on success, or NULL otherwise.
+ */
+const char *icc_get_name(struct icc_path *path)
+{
+       if (!path)
+               return NULL;
+
+       return path->name;
+}
+EXPORT_SYMBOL_GPL(icc_get_name);
+
 /**
  * icc_set_bw() - set bandwidth constraints on an interconnect path
  * @path: reference to the path returned by icc_get()
index 66b9a68..29fead2 100644 (file)
@@ -493,6 +493,19 @@ config TI_SCI_INTA_IRQCHIP
          If you wish to use interrupt aggregator irq resources managed by the
          TI System Controller, say Y here. Otherwise, say N.
 
+config RISCV_INTC
+       bool "RISC-V Local Interrupt Controller"
+       depends on RISCV
+       default y
+       help
+          This enables support for the per-HART local interrupt controller
+          found in standard RISC-V systems.  The per-HART local interrupt
+          controller handles timer interrupts, software interrupts, and
+          hardware interrupts. Without a per-HART local interrupt controller,
+          a RISC-V system will be unable to handle any interrupts.
+
+          If you don't know what to do here, say Y.
+
 config SIFIVE_PLIC
        bool "SiFive Platform-Level Interrupt Controller"
        depends on RISCV
index 3a4ce28..133f9c4 100644 (file)
@@ -98,6 +98,7 @@ obj-$(CONFIG_NDS32)                   += irq-ativic32.o
 obj-$(CONFIG_QCOM_PDC)                 += qcom-pdc.o
 obj-$(CONFIG_CSKY_MPINTC)              += irq-csky-mpintc.o
 obj-$(CONFIG_CSKY_APB_INTC)            += irq-csky-apb-intc.o
+obj-$(CONFIG_RISCV_INTC)               += irq-riscv-intc.o
 obj-$(CONFIG_SIFIVE_PLIC)              += irq-sifive-plic.o
 obj-$(CONFIG_IMX_IRQSTEER)             += irq-imx-irqsteer.o
 obj-$(CONFIG_IMX_INTMUX)               += irq-imx-intmux.o
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
new file mode 100644 (file)
index 0000000..a6f97fa
--- /dev/null
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017-2018 SiFive
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#define pr_fmt(fmt) "riscv-intc: " fmt
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+static struct irq_domain *intc_domain;
+
+static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+{
+       unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+
+       if (unlikely(cause >= BITS_PER_LONG))
+               panic("unexpected interrupt cause");
+
+       switch (cause) {
+#ifdef CONFIG_SMP
+       case RV_IRQ_SOFT:
+               /*
+                * We only use software interrupts to pass IPIs, so if a
+                * non-SMP system gets one, then we don't know what to do.
+                */
+               handle_IPI(regs);
+               break;
+#endif
+       default:
+               handle_domain_irq(intc_domain, cause, regs);
+               break;
+       }
+}
+
+/*
+ * On RISC-V systems local interrupts are masked or unmasked by writing
+ * the SIE (Supervisor Interrupt Enable) CSR.  As CSRs can only be written
+ * on the local hart, these functions can only be called on the hart that
+ * corresponds to the IRQ chip.
+ */
+
+static void riscv_intc_irq_mask(struct irq_data *d)
+{
+       csr_clear(CSR_IE, BIT(d->hwirq));
+}
+
+static void riscv_intc_irq_unmask(struct irq_data *d)
+{
+       csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static int riscv_intc_cpu_starting(unsigned int cpu)
+{
+       csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
+       return 0;
+}
+
+static int riscv_intc_cpu_dying(unsigned int cpu)
+{
+       csr_clear(CSR_IE, BIT(RV_IRQ_SOFT));
+       return 0;
+}
+
+static struct irq_chip riscv_intc_chip = {
+       .name = "RISC-V INTC",
+       .irq_mask = riscv_intc_irq_mask,
+       .irq_unmask = riscv_intc_irq_unmask,
+};
+
+static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+                                irq_hw_number_t hwirq)
+{
+       irq_set_percpu_devid(irq);
+       irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+                           handle_percpu_devid_irq, NULL, NULL);
+
+       return 0;
+}
+
+static const struct irq_domain_ops riscv_intc_domain_ops = {
+       .map    = riscv_intc_domain_map,
+       .xlate  = irq_domain_xlate_onecell,
+};
+
+static int __init riscv_intc_init(struct device_node *node,
+                                 struct device_node *parent)
+{
+       int rc, hartid;
+
+       hartid = riscv_of_parent_hartid(node);
+       if (hartid < 0) {
+               pr_warn("unable to fine hart id for %pOF\n", node);
+               return 0;
+       }
+
+       /*
+        * The DT will have one INTC DT node under each CPU (or HART)
+        * DT node so riscv_intc_init() function will be called once
+        * for each INTC DT node. We only need to do INTC initialization
+        * for the INTC DT node belonging to boot CPU (or boot HART).
+        */
+       if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
+               return 0;
+
+       intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
+                                           &riscv_intc_domain_ops, NULL);
+       if (!intc_domain) {
+               pr_err("unable to add IRQ domain\n");
+               return -ENXIO;
+       }
+
+       rc = set_handle_irq(&riscv_intc_irq);
+       if (rc) {
+               pr_err("failed to set irq handler\n");
+               return rc;
+       }
+
+       cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING,
+                         "irqchip/riscv/intc:starting",
+                         riscv_intc_cpu_starting,
+                         riscv_intc_cpu_dying);
+
+       pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+
+       return 0;
+}
+
+IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
index d9c53f8..eaa3e9f 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -76,6 +77,7 @@ struct plic_handler {
        void __iomem            *enable_base;
        struct plic_priv        *priv;
 };
+static int plic_parent_irq;
 static bool plic_cpuhp_setup_done;
 static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
 
@@ -219,15 +221,17 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
  * that source ID back to the same claim register.  This automatically enables
  * and disables the interrupt, so there's nothing else to do.
  */
-static void plic_handle_irq(struct pt_regs *regs)
+static void plic_handle_irq(struct irq_desc *desc)
 {
        struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
        void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
        irq_hw_number_t hwirq;
 
        WARN_ON_ONCE(!handler->present);
 
-       csr_clear(CSR_IE, IE_EIE);
+       chained_irq_enter(chip, desc);
+
        while ((hwirq = readl(claim))) {
                int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
 
@@ -237,21 +241,8 @@ static void plic_handle_irq(struct pt_regs *regs)
                else
                        generic_handle_irq(irq);
        }
-       csr_set(CSR_IE, IE_EIE);
-}
-
-/*
- * Walk up the DT tree until we find an active RISC-V core (HART) node and
- * extract the cpuid from it.
- */
-static int plic_find_hart_id(struct device_node *node)
-{
-       for (; node; node = node->parent) {
-               if (of_device_is_compatible(node, "riscv"))
-                       return riscv_of_processor_hartid(node);
-       }
 
-       return -1;
+       chained_irq_exit(chip, desc);
 }
 
 static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
@@ -262,10 +253,8 @@ static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
 
 static int plic_dying_cpu(unsigned int cpu)
 {
-       struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
-       csr_clear(CSR_IE, IE_EIE);
-       plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+       if (plic_parent_irq)
+               disable_percpu_irq(plic_parent_irq);
 
        return 0;
 }
@@ -274,7 +263,11 @@ static int plic_starting_cpu(unsigned int cpu)
 {
        struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
 
-       csr_set(CSR_IE, IE_EIE);
+       if (plic_parent_irq)
+               enable_percpu_irq(plic_parent_irq,
+                                 irq_get_trigger_type(plic_parent_irq));
+       else
+               pr_warn("cpu%d: parent irq not available\n", cpu);
        plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
 
        return 0;
@@ -330,7 +323,7 @@ static int __init plic_init(struct device_node *node,
                if (parent.args[0] != RV_IRQ_EXT)
                        continue;
 
-               hartid = plic_find_hart_id(parent.np);
+               hartid = riscv_of_parent_hartid(parent.np);
                if (hartid < 0) {
                        pr_warn("failed to parse hart ID for context %d.\n", i);
                        continue;
@@ -342,6 +335,14 @@ static int __init plic_init(struct device_node *node,
                        continue;
                }
 
+               /* Find parent domain and register chained handler */
+               if (!plic_parent_irq && irq_find_host(parent.np)) {
+                       plic_parent_irq = irq_of_parse_and_map(node, i);
+                       if (plic_parent_irq)
+                               irq_set_chained_handler(plic_parent_irq,
+                                                       plic_handle_irq);
+               }
+
                /*
                 * When running in M-mode we need to ignore the S-mode handler.
                 * Here we assume it always comes later, but that might be a
@@ -382,7 +383,6 @@ done:
 
        pr_info("%pOFP: mapped %d interrupts with %d handlers for"
                " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
-       set_handle_irq(plic_handle_irq);
        return 0;
 
 out_iounmap:
index 5a577a6..05b1009 100644 (file)
@@ -236,4 +236,22 @@ config SUN6I_MSGBOX
          various Allwinner SoCs. This mailbox is used for communication
          between the application CPUs and the power management coprocessor.
 
+config SPRD_MBOX
+       tristate "Spreadtrum Mailbox"
+       depends on ARCH_SPRD || COMPILE_TEST
+       help
+         Mailbox driver implementation for the Spreadtrum platform. It is used
+         to send message between application processors and MCU. Say Y here if
+         you want to build the Spreatrum mailbox controller driver.
+
+config QCOM_IPCC
+       bool "Qualcomm Technologies, Inc. IPCC driver"
+       depends on ARCH_QCOM || COMPILE_TEST
+       help
+         Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+         (IPCC) driver for MSM devices. The driver provides mailbox support for
+         sending interrupts to the clients. On the other hand, the driver also
+         acts as an interrupt controller for receiving interrupts from clients.
+         Say Y here if you want to build this driver.
+
 endif
index 2e4364e..60d224b 100644 (file)
@@ -50,3 +50,7 @@ obj-$(CONFIG_MTK_CMDQ_MBOX)   += mtk-cmdq-mailbox.o
 obj-$(CONFIG_ZYNQMP_IPI_MBOX)  += zynqmp-ipi-mailbox.o
 
 obj-$(CONFIG_SUN6I_MSGBOX)     += sun6i-msgbox.o
+
+obj-$(CONFIG_SPRD_MBOX)                += sprd-mailbox.o
+
+obj-$(CONFIG_QCOM_IPCC)                += qcom-ipcc.o
index 7906624..7205b82 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/mailbox_controller.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #define IMX_MU_xSR_GIPn(x)     BIT(28 + (3 - (x)))
@@ -66,6 +67,8 @@ struct imx_mu_priv {
        struct clk              *clk;
        int                     irq;
 
+       u32 xcr;
+
        bool                    side_b;
 };
 
@@ -154,12 +157,17 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
 
        switch (cp->type) {
        case IMX_MU_TYPE_TX:
-               if (msg->hdr.size > sizeof(*msg)) {
+               /*
+                * msg->hdr.size specifies the number of u32 words while
+                * sizeof yields bytes.
+                */
+
+               if (msg->hdr.size > sizeof(*msg) / 4) {
                        /*
                         * The real message size can be different to
                         * struct imx_sc_rpc_msg_max size
                         */
-                       dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+                       dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
                        return -EINVAL;
                }
 
@@ -198,9 +206,8 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
        imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
        *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
 
-       if (msg.hdr.size > sizeof(msg)) {
-               dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
-                       sizeof(msg), msg.hdr.size);
+       if (msg.hdr.size > sizeof(msg) / 4) {
+               dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
                return -EINVAL;
        }
 
@@ -285,8 +292,10 @@ static int imx_mu_startup(struct mbox_chan *chan)
 {
        struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
        struct imx_mu_con_priv *cp = chan->con_priv;
+       unsigned long irq_flag = IRQF_SHARED;
        int ret;
 
+       pm_runtime_get_sync(priv->dev);
        if (cp->type == IMX_MU_TYPE_TXDB) {
                /* Tx doorbell don't have ACK support */
                tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
@@ -294,8 +303,12 @@ static int imx_mu_startup(struct mbox_chan *chan)
                return 0;
        }
 
-       ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED |
-                         IRQF_NO_SUSPEND, cp->irq_desc, chan);
+       /* IPC MU should be with IRQF_NO_SUSPEND set */
+       if (!priv->dev->pm_domain)
+               irq_flag |= IRQF_NO_SUSPEND;
+
+       ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
+                         cp->irq_desc, chan);
        if (ret) {
                dev_err(priv->dev,
                        "Unable to acquire IRQ %d\n", priv->irq);
@@ -323,6 +336,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
 
        if (cp->type == IMX_MU_TYPE_TXDB) {
                tasklet_kill(&cp->txdb_tasklet);
+               pm_runtime_put_sync(priv->dev);
                return;
        }
 
@@ -341,6 +355,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
        }
 
        free_irq(priv->irq, chan);
+       pm_runtime_put_sync(priv->dev);
 }
 
 static const struct mbox_chan_ops imx_mu_ops = {
@@ -374,7 +389,7 @@ static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
                break;
        default:
                dev_err(mbox->dev, "Invalid chan type: %d\n", type);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        if (chan >= mbox->num_chans) {
@@ -508,14 +523,39 @@ static int imx_mu_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
-       return devm_mbox_controller_register(dev, &priv->mbox);
+       ret = devm_mbox_controller_register(dev, &priv->mbox);
+       if (ret) {
+               clk_disable_unprepare(priv->clk);
+               return ret;
+       }
+
+       pm_runtime_enable(dev);
+
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               pm_runtime_put_noidle(dev);
+               goto disable_runtime_pm;
+       }
+
+       ret = pm_runtime_put_sync(dev);
+       if (ret < 0)
+               goto disable_runtime_pm;
+
+       clk_disable_unprepare(priv->clk);
+
+       return 0;
+
+disable_runtime_pm:
+       pm_runtime_disable(dev);
+       clk_disable_unprepare(priv->clk);
+       return ret;
 }
 
 static int imx_mu_remove(struct platform_device *pdev)
 {
        struct imx_mu_priv *priv = platform_get_drvdata(pdev);
 
-       clk_disable_unprepare(priv->clk);
+       pm_runtime_disable(priv->dev);
 
        return 0;
 }
@@ -558,12 +598,69 @@ static const struct of_device_id imx_mu_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
 
+static int imx_mu_suspend_noirq(struct device *dev)
+{
+       struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+       if (!priv->clk)
+               priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+
+       return 0;
+}
+
+static int imx_mu_resume_noirq(struct device *dev)
+{
+       struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+       /*
+        * ONLY restore MU when context lost, the TIE could
+        * be set during noirq resume as there is MU data
+        * communication going on, and restore the saved
+        * value will overwrite the TIE and cause MU data
+        * send failed, may lead to system freeze. This issue
+        * is observed by testing freeze mode suspend.
+        */
+       if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
+               imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+
+       return 0;
+}
+
+static int imx_mu_runtime_suspend(struct device *dev)
+{
+       struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(priv->clk);
+
+       return 0;
+}
+
+static int imx_mu_runtime_resume(struct device *dev)
+{
+       struct imx_mu_priv *priv = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               dev_err(dev, "failed to enable clock\n");
+
+       return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
+                                     imx_mu_resume_noirq)
+       SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+                          imx_mu_runtime_resume, NULL)
+};
+
 static struct platform_driver imx_mu_driver = {
        .probe          = imx_mu_probe,
        .remove         = imx_mu_remove,
        .driver = {
                .name   = "imx_mu",
                .of_match_table = imx_mu_dt_ids,
+               .pm = &imx_mu_pm_ops,
        },
 };
 module_platform_driver(imx_mu_driver);
index 34844b7..8c7fac3 100644 (file)
@@ -568,7 +568,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
        return ret;
 }
 
-struct platform_driver pcc_mbox_driver = {
+static struct platform_driver pcc_mbox_driver = {
        .probe = pcc_mbox_probe,
        .driver = {
                .name = "PCCT",
index eeebafd..cec34f0 100644 (file)
@@ -24,6 +24,35 @@ struct qcom_apcs_ipc {
        struct platform_device *clk;
 };
 
+struct qcom_apcs_ipc_data {
+       int offset;
+       char *clk_name;
+};
+
+static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
+       .offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
+};
+
+static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
+       .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
+       .offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
+       .offset = 16, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
+       .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
+       .offset = 12, .clk_name = NULL
+};
+
 static const struct regmap_config apcs_regmap_config = {
        .reg_bits = 32,
        .reg_stride = 4,
@@ -48,17 +77,12 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
 static int qcom_apcs_ipc_probe(struct platform_device *pdev)
 {
        struct qcom_apcs_ipc *apcs;
+       const struct qcom_apcs_ipc_data *apcs_data;
        struct regmap *regmap;
        struct resource *res;
-       unsigned long offset;
        void __iomem *base;
        unsigned long i;
        int ret;
-       const struct of_device_id apcs_clk_match_table[] = {
-               { .compatible = "qcom,msm8916-apcs-kpss-global", },
-               { .compatible = "qcom,qcs404-apcs-apps-global", },
-               {}
-       };
 
        apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
        if (!apcs)
@@ -73,10 +97,10 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
-       offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+       apcs_data = of_device_get_match_data(&pdev->dev);
 
        apcs->regmap = regmap;
-       apcs->offset = offset;
+       apcs->offset = apcs_data->offset;
 
        /* Initialize channel identifiers */
        for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
@@ -93,9 +117,9 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
+       if (apcs_data->clk_name) {
                apcs->clk = platform_device_register_data(&pdev->dev,
-                                                         "qcom-apcs-msm8916-clk",
+                                                         apcs_data->clk_name,
                                                          PLATFORM_DEVID_NONE,
                                                          NULL, 0);
                if (IS_ERR(apcs->clk))
@@ -119,14 +143,15 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
 
 /* .data is the offset of the ipc register within the global block */
 static const struct of_device_id qcom_apcs_ipc_of_match[] = {
-       { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
-       { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
-       { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
-       { .compatible = "qcom,qcs404-apcs-apps-global", .data = (void *)8 },
-       { .compatible = "qcom,sc7180-apss-shared", .data = (void *)12 },
-       { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
-       { .compatible = "qcom,sm8150-apss-shared", .data = (void *)12 },
-       { .compatible = "qcom,ipq8074-apcs-apps-global", .data = (void *)8 },
+       { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
+       { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+       { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+       { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
+       { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+       { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
+       { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+       { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+       { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
        {}
 };
 MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
new file mode 100644 (file)
index 0000000..2d13c72
--- /dev/null
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+
+#define IPCC_MBOX_MAX_CHAN             48
+
+/* IPCC Register offsets */
+#define IPCC_REG_SEND_ID               0x0c
+#define IPCC_REG_RECV_ID               0x10
+#define IPCC_REG_RECV_SIGNAL_ENABLE    0x14
+#define IPCC_REG_RECV_SIGNAL_DISABLE   0x18
+#define IPCC_REG_RECV_SIGNAL_CLEAR     0x1c
+#define IPCC_REG_CLIENT_CLEAR          0x38
+
+#define IPCC_SIGNAL_ID_MASK            GENMASK(15, 0)
+#define IPCC_CLIENT_ID_MASK            GENMASK(31, 16)
+
+#define IPCC_NO_PENDING_IRQ            GENMASK(31, 0)
+
+/**
+ * struct qcom_ipcc_chan_info - Per-mailbox-channel info
+ * @client_id: The client-id to which the interrupt has to be triggered
+ * @signal_id: The signal-id to which the interrupt has to be triggered
+ */
+struct qcom_ipcc_chan_info {
+       u16 client_id;
+       u16 signal_id;
+};
+
+/**
+ * struct qcom_ipcc - Holder for the mailbox driver
+ * @dev:               Device associated with this instance
+ * @base:              Base address of the IPCC frame associated to APSS
+ * @irq_domain:                The irq_domain associated with this instance
+ * @chan:              The mailbox channels array
+ * @mchan:             The per-mailbox channel info array
+ * @mbox:              The mailbox controller
+ * @irq:               Summary irq
+ */
+struct qcom_ipcc {
+       struct device *dev;
+       void __iomem *base;
+       struct irq_domain *irq_domain;
+       struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
+       struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+       struct mbox_controller mbox;
+       int irq;
+};
+
+static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
+{
+       return container_of(mbox, struct qcom_ipcc, mbox);
+}
+
+static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
+{
+       return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
+              FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
+}
+
+static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
+{
+       struct qcom_ipcc *ipcc = data;
+       u32 hwirq;
+       int virq;
+
+       for (;;) {
+               hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+               if (hwirq == IPCC_NO_PENDING_IRQ)
+                       break;
+
+               virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+               writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
+               generic_handle_irq(virq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void qcom_ipcc_mask_irq(struct irq_data *irqd)
+{
+       struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+       irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+       writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
+}
+
+static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
+{
+       struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+       irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+       writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
+}
+
+static struct irq_chip qcom_ipcc_irq_chip = {
+       .name = "ipcc",
+       .irq_mask = qcom_ipcc_mask_irq,
+       .irq_unmask = qcom_ipcc_unmask_irq,
+       .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
+                               irq_hw_number_t hw)
+{
+       struct qcom_ipcc *ipcc = d->host_data;
+
+       irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, ipcc);
+       irq_set_noprobe(irq);
+
+       return 0;
+}
+
+static int qcom_ipcc_domain_xlate(struct irq_domain *d,
+                                 struct device_node *node, const u32 *intspec,
+                                 unsigned int intsize,
+                                 unsigned long *out_hwirq,
+                                 unsigned int *out_type)
+{
+       if (intsize != 3)
+               return -EINVAL;
+
+       *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
+       *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+       return 0;
+}
+
+static const struct irq_domain_ops qcom_ipcc_irq_ops = {
+       .map = qcom_ipcc_domain_map,
+       .xlate = qcom_ipcc_domain_xlate,
+};
+
+static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+       struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
+       struct qcom_ipcc_chan_info *mchan = chan->con_priv;
+       u32 hwirq;
+
+       hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
+       writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
+
+       return 0;
+}
+
+static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+                                       const struct of_phandle_args *ph)
+{
+       struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
+       struct qcom_ipcc_chan_info *mchan;
+       struct mbox_chan *chan;
+       unsigned int i;
+
+       if (ph->args_count != 2)
+               return ERR_PTR(-EINVAL);
+
+       for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
+               chan = &ipcc->chan[i];
+               if (!chan->con_priv) {
+                       mchan = &ipcc->mchan[i];
+                       mchan->client_id = ph->args[0];
+                       mchan->signal_id = ph->args[1];
+                       chan->con_priv = mchan;
+                       break;
+               }
+
+               chan = NULL;
+       }
+
+       return chan ?: ERR_PTR(-EBUSY);
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+       .send_data = qcom_ipcc_mbox_send_data,
+};
+
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+{
+       struct mbox_controller *mbox;
+       struct device *dev = ipcc->dev;
+
+       mbox = &ipcc->mbox;
+       mbox->dev = dev;
+       mbox->num_chans = IPCC_MBOX_MAX_CHAN;
+       mbox->chans = ipcc->chan;
+       mbox->ops = &ipcc_mbox_chan_ops;
+       mbox->of_xlate = qcom_ipcc_mbox_xlate;
+       mbox->txdone_irq = false;
+       mbox->txdone_poll = false;
+
+       return devm_mbox_controller_register(dev, mbox);
+}
+
+static int qcom_ipcc_probe(struct platform_device *pdev)
+{
+       struct qcom_ipcc *ipcc;
+       int ret;
+
+       ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
+       if (!ipcc)
+               return -ENOMEM;
+
+       ipcc->dev = &pdev->dev;
+
+       ipcc->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(ipcc->base))
+               return PTR_ERR(ipcc->base);
+
+       ipcc->irq = platform_get_irq(pdev, 0);
+       if (ipcc->irq < 0)
+               return ipcc->irq;
+
+       ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
+                                              &qcom_ipcc_irq_ops, ipcc);
+       if (!ipcc->irq_domain)
+               return -ENOMEM;
+
+       ret = qcom_ipcc_setup_mbox(ipcc);
+       if (ret)
+               goto err_mbox;
+
+       ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
+                              IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
+               goto err_mbox;
+       }
+
+       enable_irq_wake(ipcc->irq);
+       platform_set_drvdata(pdev, ipcc);
+
+       return 0;
+
+err_mbox:
+       irq_domain_remove(ipcc->irq_domain);
+
+       return ret;
+}
+
+static int qcom_ipcc_remove(struct platform_device *pdev)
+{
+       struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
+
+       disable_irq_wake(ipcc->irq);
+       irq_domain_remove(ipcc->irq_domain);
+
+       return 0;
+}
+
+static const struct of_device_id qcom_ipcc_of_match[] = {
+       { .compatible = "qcom,ipcc"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
+
+static struct platform_driver qcom_ipcc_driver = {
+       .probe = qcom_ipcc_probe,
+       .remove = qcom_ipcc_remove,
+       .driver = {
+               .name = "qcom-ipcc",
+               .of_match_table = qcom_ipcc_of_match,
+       },
+};
+
+static int __init qcom_ipcc_init(void)
+{
+       return platform_driver_register(&qcom_ipcc_driver);
+}
+arch_initcall(qcom_ipcc_init);
+
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
new file mode 100644 (file)
index 0000000..f6fab24
--- /dev/null
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spreadtrum mailbox driver
+ *
+ * Copyright (c) 2020 Spreadtrum Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define SPRD_MBOX_ID           0x0
+#define SPRD_MBOX_MSG_LOW      0x4
+#define SPRD_MBOX_MSG_HIGH     0x8
+#define SPRD_MBOX_TRIGGER      0xc
+#define SPRD_MBOX_FIFO_RST     0x10
+#define SPRD_MBOX_FIFO_STS     0x14
+#define SPRD_MBOX_IRQ_STS      0x18
+#define SPRD_MBOX_IRQ_MSK      0x1c
+#define SPRD_MBOX_LOCK         0x20
+#define SPRD_MBOX_FIFO_DEPTH   0x24
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_INBOX_FIFO_DELIVER_MASK           GENMASK(23, 16)
+#define SPRD_INBOX_FIFO_OVERLOW_MASK           GENMASK(15, 8)
+#define SPRD_INBOX_FIFO_DELIVER_SHIFT          16
+#define SPRD_INBOX_FIFO_BUSY_MASK              GENMASK(7, 0)
+
+/* Bit and mask definiation for SPRD_MBOX_IRQ_STS register */
+#define SPRD_MBOX_IRQ_CLR                      BIT(0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_OUTBOX_FIFO_FULL                  BIT(0)
+#define SPRD_OUTBOX_FIFO_WR_SHIFT              16
+#define SPRD_OUTBOX_FIFO_RD_SHIFT              24
+#define SPRD_OUTBOX_FIFO_POS_MASK              GENMASK(7, 0)
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_INBOX_FIFO_BLOCK_IRQ              BIT(0)
+#define SPRD_INBOX_FIFO_OVERFLOW_IRQ           BIT(1)
+#define SPRD_INBOX_FIFO_DELIVER_IRQ            BIT(2)
+#define SPRD_INBOX_FIFO_IRQ_MASK               GENMASK(2, 0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ         BIT(0)
+#define SPRD_OUTBOX_FIFO_IRQ_MASK              GENMASK(4, 0)
+
+#define SPRD_MBOX_CHAN_MAX                     8
+
+struct sprd_mbox_priv {
+       struct mbox_controller  mbox;
+       struct device           *dev;
+       void __iomem            *inbox_base;
+       void __iomem            *outbox_base;
+       struct clk              *clk;
+       u32                     outbox_fifo_depth;
+
+       struct mbox_chan        chan[SPRD_MBOX_CHAN_MAX];
+};
+
+static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
+{
+       return container_of(mbox, struct sprd_mbox_priv, mbox);
+}
+
+static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
+{
+       u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
+               SPRD_OUTBOX_FIFO_POS_MASK;
+       u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
+               SPRD_OUTBOX_FIFO_POS_MASK;
+       u32 fifo_len;
+
+       /*
+        * If the read pointer is equal with write pointer, which means the fifo
+        * is full or empty.
+        */
+       if (wr_pos == rd_pos) {
+               if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
+                       fifo_len = priv->outbox_fifo_depth;
+               else
+                       fifo_len = 0;
+       } else if (wr_pos > rd_pos) {
+               fifo_len = wr_pos - rd_pos;
+       } else {
+               fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
+       }
+
+       return fifo_len;
+}
+
+static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+{
+       struct sprd_mbox_priv *priv = data;
+       struct mbox_chan *chan;
+       u32 fifo_sts, fifo_len, msg[2];
+       int i, id;
+
+       fifo_sts = readl(priv->outbox_base + SPRD_MBOX_FIFO_STS);
+
+       fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
+       if (!fifo_len) {
+               dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
+               return IRQ_NONE;
+       }
+
+       for (i = 0; i < fifo_len; i++) {
+               msg[0] = readl(priv->outbox_base + SPRD_MBOX_MSG_LOW);
+               msg[1] = readl(priv->outbox_base + SPRD_MBOX_MSG_HIGH);
+               id = readl(priv->outbox_base + SPRD_MBOX_ID);
+
+               chan = &priv->chan[id];
+               mbox_chan_received_data(chan, (void *)msg);
+
+               /* Trigger to update outbox FIFO pointer */
+               writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+       }
+
+       /* Clear irq status after reading all message. */
+       writel(SPRD_MBOX_IRQ_CLR, priv->outbox_base + SPRD_MBOX_IRQ_STS);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
+{
+       struct sprd_mbox_priv *priv = data;
+       struct mbox_chan *chan;
+       u32 fifo_sts, send_sts, busy, id;
+
+       fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
+
+       /* Get the inbox data delivery status */
+       send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
+               SPRD_INBOX_FIFO_DELIVER_SHIFT;
+       if (!send_sts) {
+               dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
+               return IRQ_NONE;
+       }
+
+       while (send_sts) {
+               id = __ffs(send_sts);
+               send_sts &= (send_sts - 1);
+
+               chan = &priv->chan[id];
+
+               /*
+                * Check if the message was fetched by remote traget, if yes,
+                * that means the transmission has been completed.
+                */
+               busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
+               if (!(busy & BIT(id)))
+                       mbox_chan_txdone(chan, 0);
+       }
+
+       /* Clear FIFO delivery and overflow status */
+       writel(fifo_sts &
+              (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
+              priv->inbox_base + SPRD_MBOX_FIFO_RST);
+
+       /* Clear irq status */
+       writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
+
+       return IRQ_HANDLED;
+}
+
+static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+       struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+       unsigned long id = (unsigned long)chan->con_priv;
+       u32 *data = msg;
+
+       /* Write data into inbox FIFO, and only support 8 bytes every time */
+       writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
+       writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
+
+       /* Set target core id */
+       writel(id, priv->inbox_base + SPRD_MBOX_ID);
+
+       /* Trigger remote request */
+       writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
+
+       return 0;
+}
+
+static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+       struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+       unsigned long id = (unsigned long)chan->con_priv;
+       u32 busy;
+
+       timeout = jiffies + msecs_to_jiffies(timeout);
+
+       while (time_before(jiffies, timeout)) {
+               busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
+                       SPRD_INBOX_FIFO_BUSY_MASK;
+               if (!(busy & BIT(id))) {
+                       mbox_chan_txdone(chan, 0);
+                       return 0;
+               }
+
+               udelay(1);
+       }
+
+       return -ETIME;
+}
+
+static int sprd_mbox_startup(struct mbox_chan *chan)
+{
+       struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+       u32 val;
+
+       /* Select outbox FIFO mode and reset the outbox FIFO status */
+       writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+
+       /* Enable inbox FIFO overflow and delivery interrupt */
+       val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+       val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+       writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+
+       /* Enable outbox FIFO not empty interrupt */
+       val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+       val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+       writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+
+       return 0;
+}
+
+static void sprd_mbox_shutdown(struct mbox_chan *chan)
+{
+       struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+
+       /* Disable inbox & outbox interrupt */
+       writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+       writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+}
+
+static const struct mbox_chan_ops sprd_mbox_ops = {
+       .send_data      = sprd_mbox_send_data,
+       .flush          = sprd_mbox_flush,
+       .startup        = sprd_mbox_startup,
+       .shutdown       = sprd_mbox_shutdown,
+};
+
+static void sprd_mbox_disable(void *data)
+{
+       struct sprd_mbox_priv *priv = data;
+
+       clk_disable_unprepare(priv->clk);
+}
+
+static int sprd_mbox_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sprd_mbox_priv *priv;
+       int ret, inbox_irq, outbox_irq;
+       unsigned long id;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->dev = dev;
+
+       /*
+        * The Spreadtrum mailbox uses an inbox to send messages to the target
+        * core, and uses an outbox to receive messages from other cores.
+        *
+        * Thus the mailbox controller supplies 2 different register addresses
+        * and IRQ numbers for inbox and outbox.
+        */
+       priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->inbox_base))
+               return PTR_ERR(priv->inbox_base);
+
+       priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
+       if (IS_ERR(priv->outbox_base))
+               return PTR_ERR(priv->outbox_base);
+
+       priv->clk = devm_clk_get(dev, "enable");
+       if (IS_ERR(priv->clk)) {
+               dev_err(dev, "failed to get mailbox clock\n");
+               return PTR_ERR(priv->clk);
+       }
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
+
+       ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
+       if (ret) {
+               dev_err(dev, "failed to add mailbox disable action\n");
+               return ret;
+       }
+
+       inbox_irq = platform_get_irq(pdev, 0);
+       if (inbox_irq < 0)
+               return inbox_irq;
+
+       ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
+                              IRQF_NO_SUSPEND, dev_name(dev), priv);
+       if (ret) {
+               dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
+               return ret;
+       }
+
+       outbox_irq = platform_get_irq(pdev, 1);
+       if (outbox_irq < 0)
+               return outbox_irq;
+
+       ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
+                              IRQF_NO_SUSPEND, dev_name(dev), priv);
+       if (ret) {
+               dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+               return ret;
+       }
+
+       /* Get the default outbox FIFO depth */
+       priv->outbox_fifo_depth =
+               readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
+       priv->mbox.dev = dev;
+       priv->mbox.chans = &priv->chan[0];
+       priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
+       priv->mbox.ops = &sprd_mbox_ops;
+       priv->mbox.txdone_irq = true;
+
+       for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
+               priv->chan[id].con_priv = (void *)id;
+
+       ret = devm_mbox_controller_register(dev, &priv->mbox);
+       if (ret) {
+               dev_err(dev, "failed to register mailbox: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id sprd_mbox_of_match[] = {
+       { .compatible = "sprd,sc9860-mailbox", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
+
+static struct platform_driver sprd_mbox_driver = {
+       .driver = {
+               .name = "sprd-mailbox",
+               .of_match_table = sprd_mbox_of_match,
+       },
+       .probe  = sprd_mbox_probe,
+};
+module_platform_driver(sprd_mbox_driver);
+
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum mailbox driver");
+MODULE_LICENSE("GPL v2");
index 86887c9..f44079d 100644 (file)
@@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
                mchan->req_buf_size = resource_size(&res);
                mchan->req_buf = devm_ioremap(mdev, res.start,
                                              mchan->req_buf_size);
-               if (IS_ERR(mchan->req_buf)) {
+               if (!mchan->req_buf) {
                        dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
-                       ret = PTR_ERR(mchan->req_buf);
-                       return ret;
+                       return -ENOMEM;
                }
        } else if (ret != -ENODEV) {
                dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
@@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
                mchan->resp_buf_size = resource_size(&res);
                mchan->resp_buf = devm_ioremap(mdev, res.start,
                                               mchan->resp_buf_size);
-               if (IS_ERR(mchan->resp_buf)) {
+               if (!mchan->resp_buf) {
                        dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
-                       ret = PTR_ERR(mchan->resp_buf);
-                       return ret;
+                       return -ENOMEM;
                }
        } else if (ret != -ENODEV) {
                dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
                mchan->req_buf_size = resource_size(&res);
                mchan->req_buf = devm_ioremap(mdev, res.start,
                                              mchan->req_buf_size);
-               if (IS_ERR(mchan->req_buf)) {
+               if (!mchan->req_buf) {
                        dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
-                       ret = PTR_ERR(mchan->req_buf);
-                       return ret;
+                       return -ENOMEM;
                }
        } else if (ret != -ENODEV) {
                dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
                mchan->resp_buf_size = resource_size(&res);
                mchan->resp_buf = devm_ioremap(mdev, res.start,
                                               mchan->resp_buf_size);
-               if (IS_ERR(mchan->resp_buf)) {
+               if (!mchan->resp_buf) {
                        dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
-                       ret = PTR_ERR(mchan->resp_buf);
-                       return ret;
+                       return -ENOMEM;
                }
        } else if (ret != -ENODEV) {
                dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -668,10 +664,9 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
 
        /* IPI IRQ */
        ret = platform_get_irq(pdev, 0);
-       if (ret < 0) {
-               dev_err(dev, "unable to find IPI IRQ.\n");
+       if (ret < 0)
                goto free_mbox_dev;
-       }
+
        pdata->irq = ret;
        ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
                               IRQF_SHARED, dev_name(dev), pdata);
index 3bfe72c..8f201d0 100644 (file)
@@ -116,7 +116,7 @@ config MIC_COSM
 
 config VOP
        tristate "VOP Driver"
-       depends on VOP_BUS && VHOST_DPN
+       depends on VOP_BUS
        select VHOST_RING
        select VIRTIO
        help
index 142c0f9..42001c4 100644 (file)
@@ -420,8 +420,9 @@ read_pri_intelext(struct map_info *map, __u16 adr)
                extra_size = 0;
 
                /* Protection Register info */
-               extra_size += (extp->NumProtectionFields - 1) *
-                             sizeof(struct cfi_intelext_otpinfo);
+               if (extp->NumProtectionFields)
+                       extra_size += (extp->NumProtectionFields - 1) *
+                                     sizeof(struct cfi_intelext_otpinfo);
        }
 
        if (extp->MinorVersion >= '1') {
@@ -695,14 +696,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
         */
        if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
            && extp->FeatureSupport & (1 << 9)) {
+               int offs = 0;
                struct cfi_private *newcfi;
                struct flchip *chip;
                struct flchip_shared *shared;
-               int offs, numregions, numparts, partshift, numvirtchips, i, j;
+               int numregions, numparts, partshift, numvirtchips, i, j;
 
                /* Protection Register info */
-               offs = (extp->NumProtectionFields - 1) *
-                      sizeof(struct cfi_intelext_otpinfo);
+               if (extp->NumProtectionFields)
+                       offs = (extp->NumProtectionFields - 1) *
+                              sizeof(struct cfi_intelext_otpinfo);
 
                /* Burst Read info */
                offs += extp->extra[offs+1]+2;
index eb0f460..a030792 100644 (file)
@@ -647,7 +647,7 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
 
        for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
                ecc[i] = bitrev8(hwecc[i]);
-       numerrs = decode_bch(docg3->cascade->bch, NULL,
+       numerrs = bch_decode(docg3->cascade->bch, NULL,
                             DOC_ECC_BCH_COVERED_BYTES,
                             NULL, ecc, NULL, errorpos);
        BUG_ON(numerrs == -EINVAL);
@@ -1984,8 +1984,8 @@ static int __init docg3_probe(struct platform_device *pdev)
                return ret;
        cascade->base = base;
        mutex_init(&cascade->lock);
-       cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
-                            DOC_ECC_BCH_PRIMPOLY);
+       cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+                               DOC_ECC_BCH_PRIMPOLY, false);
        if (!cascade->bch)
                return ret;
 
@@ -2021,7 +2021,7 @@ notfound:
        ret = -ENODEV;
        dev_info(dev, "No supported DiskOnChip found\n");
 err_probe:
-       free_bch(cascade->bch);
+       bch_free(cascade->bch);
        for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
                if (cascade->floors[floor])
                        doc_release_device(cascade->floors[floor]);
@@ -2045,7 +2045,7 @@ static int docg3_release(struct platform_device *pdev)
                if (cascade->floors[floor])
                        doc_release_device(cascade->floors[floor]);
 
-       free_bch(docg3->cascade->bch);
+       bch_free(docg3->cascade->bch);
        return 0;
 }
 
index a289c8b..d4a46e1 100644 (file)
 
 #define FLASH_PARALLEL_HIGH_PIN_CNT    (1 << 20)       /* else low pin cnt */
 
-static const struct of_device_id syscon_match[] = {
-       { .compatible = "cortina,gemini-syscon" },
-       { },
-};
-
 struct gemini_flash {
        struct device *dev;
        struct pinctrl *p;
index 078e0f6..32e52d8 100644 (file)
@@ -89,8 +89,6 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
 
        ret = erase_write (mtd, mtdblk->cache_offset,
                           mtdblk->cache_size, mtdblk->cache_data);
-       if (ret)
-               return ret;
 
        /*
         * Here we could arguably set the cache state to STATE_CLEAN.
@@ -98,9 +96,14 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
         * be notified if this content is altered on the flash by other
         * means.  Let's declare it empty and leave buffering tasks to
         * the buffer cache instead.
+        *
+        * If this cache_offset points to a bad block, data cannot be
+        * written to the device. Clear cache_state to avoid writing to
+        * bad blocks repeatedly.
         */
-       mtdblk->cache_state = STATE_EMPTY;
-       return 0;
+       if (ret == 0 || ret == -EIO)
+               mtdblk->cache_state = STATE_EMPTY;
+       return ret;
 }
 
 
index b47691e..76d832a 100644 (file)
@@ -617,6 +617,19 @@ int add_mtd_device(struct mtd_info *mtd)
                    !(mtd->flags & MTD_NO_ERASE)))
                return -EINVAL;
 
+       /*
+        * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
+        * master is an MLC NAND and has a proper pairing scheme defined.
+        * We also reject masters that implement ->_writev() for now, because
+        * NAND controller drivers don't implement this hook, and adding the
+        * SLC -> MLC address/length conversion to this path is useless if we
+        * don't have a user.
+        */
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
+           (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
+            !master->pairing || master->_writev))
+               return -EINVAL;
+
        mutex_lock(&mtd_table_mutex);
 
        i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -632,6 +645,14 @@ int add_mtd_device(struct mtd_info *mtd)
        if (mtd->bitflip_threshold == 0)
                mtd->bitflip_threshold = mtd->ecc_strength;
 
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+               int ngroups = mtd_pairing_groups(master);
+
+               mtd->erasesize /= ngroups;
+               mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
+                           mtd->erasesize;
+       }
+
        if (is_power_of_2(mtd->erasesize))
                mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
        else
@@ -1074,9 +1095,11 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
        struct mtd_info *master = mtd_get_master(mtd);
        u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+       struct erase_info adjinstr;
        int ret;
 
        instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+       adjinstr = *instr;
 
        if (!mtd->erasesize || !master->_erase)
                return -ENOTSUPP;
@@ -1091,12 +1114,27 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
 
        ledtrig_mtd_activity();
 
-       instr->addr += mst_ofs;
-       ret = master->_erase(master, instr);
-       if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
-               instr->fail_addr -= mst_ofs;
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+               adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
+                               master->erasesize;
+               adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
+                               master->erasesize) -
+                              adjinstr.addr;
+       }
+
+       adjinstr.addr += mst_ofs;
+
+       ret = master->_erase(master, &adjinstr);
+
+       if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
+               instr->fail_addr = adjinstr.fail_addr - mst_ofs;
+               if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+                       instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
+                                                        master);
+                       instr->fail_addr *= mtd->erasesize;
+               }
+       }
 
-       instr->addr -= mst_ofs;
        return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_erase);
@@ -1276,6 +1314,101 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
        return 0;
 }
 
+static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
+                           struct mtd_oob_ops *ops)
+{
+       struct mtd_info *master = mtd_get_master(mtd);
+       int ret;
+
+       from = mtd_get_master_ofs(mtd, from);
+       if (master->_read_oob)
+               ret = master->_read_oob(master, from, ops);
+       else
+               ret = master->_read(master, from, ops->len, &ops->retlen,
+                                   ops->datbuf);
+
+       return ret;
+}
+
+static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
+                            struct mtd_oob_ops *ops)
+{
+       struct mtd_info *master = mtd_get_master(mtd);
+       int ret;
+
+       to = mtd_get_master_ofs(mtd, to);
+       if (master->_write_oob)
+               ret = master->_write_oob(master, to, ops);
+       else
+               ret = master->_write(master, to, ops->len, &ops->retlen,
+                                    ops->datbuf);
+
+       return ret;
+}
+
+static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
+                              struct mtd_oob_ops *ops)
+{
+       struct mtd_info *master = mtd_get_master(mtd);
+       int ngroups = mtd_pairing_groups(master);
+       int npairs = mtd_wunit_per_eb(master) / ngroups;
+       struct mtd_oob_ops adjops = *ops;
+       unsigned int wunit, oobavail;
+       struct mtd_pairing_info info;
+       int max_bitflips = 0;
+       u32 ebofs, pageofs;
+       loff_t base, pos;
+
+       ebofs = mtd_mod_by_eb(start, mtd);
+       base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
+       info.group = 0;
+       info.pair = mtd_div_by_ws(ebofs, mtd);
+       pageofs = mtd_mod_by_ws(ebofs, mtd);
+       oobavail = mtd_oobavail(mtd, ops);
+
+       while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
+               int ret;
+
+               if (info.pair >= npairs) {
+                       info.pair = 0;
+                       base += master->erasesize;
+               }
+
+               wunit = mtd_pairing_info_to_wunit(master, &info);
+               pos = mtd_wunit_to_offset(mtd, base, wunit);
+
+               adjops.len = ops->len - ops->retlen;
+               if (adjops.len > mtd->writesize - pageofs)
+                       adjops.len = mtd->writesize - pageofs;
+
+               adjops.ooblen = ops->ooblen - ops->oobretlen;
+               if (adjops.ooblen > oobavail - adjops.ooboffs)
+                       adjops.ooblen = oobavail - adjops.ooboffs;
+
+               if (read) {
+                       ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
+                       if (ret > 0)
+                               max_bitflips = max(max_bitflips, ret);
+               } else {
+                       ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
+               }
+
+               if (ret < 0)
+                       return ret;
+
+               max_bitflips = max(max_bitflips, ret);
+               ops->retlen += adjops.retlen;
+               ops->oobretlen += adjops.oobretlen;
+               adjops.datbuf += adjops.retlen;
+               adjops.oobbuf += adjops.oobretlen;
+               adjops.ooboffs = 0;
+               pageofs = 0;
+               info.pair++;
+       }
+
+       return max_bitflips;
+}
+
 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
 {
        struct mtd_info *master = mtd_get_master(mtd);
@@ -1294,12 +1427,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
        if (!master->_read_oob && (!master->_read || ops->oobbuf))
                return -EOPNOTSUPP;
 
-       from = mtd_get_master_ofs(mtd, from);
-       if (master->_read_oob)
-               ret_code = master->_read_oob(master, from, ops);
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+               ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
        else
-               ret_code = master->_read(master, from, ops->len, &ops->retlen,
-                                        ops->datbuf);
+               ret_code = mtd_read_oob_std(mtd, from, ops);
 
        mtd_update_ecc_stats(mtd, master, &old_stats);
 
@@ -1338,13 +1469,10 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
        if (!master->_write_oob && (!master->_write || ops->oobbuf))
                return -EOPNOTSUPP;
 
-       to = mtd_get_master_ofs(mtd, to);
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+               return mtd_io_emulated_slc(mtd, to, false, ops);
 
-       if (master->_write_oob)
-               return master->_write_oob(master, to, ops);
-       else
-               return master->_write(master, to, ops->len, &ops->retlen,
-                                     ops->datbuf);
+       return mtd_write_oob_std(mtd, to, ops);
 }
 EXPORT_SYMBOL_GPL(mtd_write_oob);
 
@@ -1672,7 +1800,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
  * @start: first ECC byte to set
  * @nbytes: number of ECC bytes to set
  *
- * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
  *
  * Returns zero on success, a negative error code otherwise.
  */
@@ -1817,6 +1945,12 @@ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                return -EINVAL;
        if (!len)
                return 0;
+
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+               ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+               len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+       }
+
        return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
 }
 EXPORT_SYMBOL_GPL(mtd_lock);
@@ -1831,6 +1965,12 @@ int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                return -EINVAL;
        if (!len)
                return 0;
+
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+               ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+               len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+       }
+
        return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
 }
 EXPORT_SYMBOL_GPL(mtd_unlock);
@@ -1845,6 +1985,12 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
                return -EINVAL;
        if (!len)
                return 0;
+
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+               ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+               len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+       }
+
        return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
 }
 EXPORT_SYMBOL_GPL(mtd_is_locked);
@@ -1857,6 +2003,10 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
                return -EINVAL;
        if (!master->_block_isreserved)
                return 0;
+
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+               ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
        return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
 }
 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
@@ -1869,6 +2019,10 @@ int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
                return -EINVAL;
        if (!master->_block_isbad)
                return 0;
+
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+               ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
        return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
 }
 EXPORT_SYMBOL_GPL(mtd_block_isbad);
@@ -1885,6 +2039,9 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
        if (!(mtd->flags & MTD_WRITEABLE))
                return -EROFS;
 
+       if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+               ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
        ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
        if (ret)
                return ret;
index 3f60256..c3575b6 100644 (file)
@@ -35,9 +35,12 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
                                           const struct mtd_partition *part,
                                           int partno, uint64_t cur_offset)
 {
-       int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
-                                                           parent->erasesize;
-       struct mtd_info *child, *master = mtd_get_master(parent);
+       struct mtd_info *master = mtd_get_master(parent);
+       int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
+                          master->writesize : master->erasesize;
+       u64 parent_size = mtd_is_partition(parent) ?
+                         parent->part.size : parent->size;
+       struct mtd_info *child;
        u32 remainder;
        char *name;
        u64 tmp;
@@ -56,8 +59,9 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
        /* set up the MTD object for this partition */
        child->type = parent->type;
        child->part.flags = parent->flags & ~part->mask_flags;
+       child->part.flags |= part->add_flags;
        child->flags = child->part.flags;
-       child->size = part->size;
+       child->part.size = part->size;
        child->writesize = parent->writesize;
        child->writebufsize = parent->writebufsize;
        child->oobsize = parent->oobsize;
@@ -98,29 +102,29 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
        }
        if (child->part.offset == MTDPART_OFS_RETAIN) {
                child->part.offset = cur_offset;
-               if (parent->size - child->part.offset >= child->size) {
-                       child->size = parent->size - child->part.offset -
-                                     child->size;
+               if (parent_size - child->part.offset >= child->part.size) {
+                       child->part.size = parent_size - child->part.offset -
+                                          child->part.size;
                } else {
                        printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
-                               part->name, parent->size - child->part.offset,
-                               child->size);
+                               part->name, parent_size - child->part.offset,
+                               child->part.size);
                        /* register to preserve ordering */
                        goto out_register;
                }
        }
-       if (child->size == MTDPART_SIZ_FULL)
-               child->size = parent->size - child->part.offset;
+       if (child->part.size == MTDPART_SIZ_FULL)
+               child->part.size = parent_size - child->part.offset;
 
        printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
-              child->part.offset, child->part.offset + child->size,
+              child->part.offset, child->part.offset + child->part.size,
               child->name);
 
        /* let's do some sanity checks */
-       if (child->part.offset >= parent->size) {
+       if (child->part.offset >= parent_size) {
                /* let's register it anyway to preserve ordering */
                child->part.offset = 0;
-               child->size = 0;
+               child->part.size = 0;
 
                /* Initialize ->erasesize to make add_mtd_device() happy. */
                child->erasesize = parent->erasesize;
@@ -128,15 +132,16 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
                        part->name);
                goto out_register;
        }
-       if (child->part.offset + child->size > parent->size) {
-               child->size = parent->size - child->part.offset;
+       if (child->part.offset + child->part.size > parent->size) {
+               child->part.size = parent_size - child->part.offset;
                printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
-                       part->name, parent->name, child->size);
+                       part->name, parent->name, child->part.size);
        }
+
        if (parent->numeraseregions > 1) {
                /* Deal with variable erase size stuff */
                int i, max = parent->numeraseregions;
-               u64 end = child->part.offset + child->size;
+               u64 end = child->part.offset + child->part.size;
                struct mtd_erase_region_info *regions = parent->eraseregions;
 
                /* Find the first erase regions which is part of this
@@ -156,7 +161,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
                BUG_ON(child->erasesize == 0);
        } else {
                /* Single erase size */
-               child->erasesize = parent->erasesize;
+               child->erasesize = master->erasesize;
        }
 
        /*
@@ -178,7 +183,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
                        part->name);
        }
 
-       tmp = mtd_get_master_ofs(child, 0) + child->size;
+       tmp = mtd_get_master_ofs(child, 0) + child->part.size;
        remainder = do_div(tmp, wr_alignment);
        if ((child->flags & MTD_WRITEABLE) && remainder) {
                child->flags &= ~MTD_WRITEABLE;
@@ -186,6 +191,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
                        part->name);
        }
 
+       child->size = child->part.size;
        child->ecc_step_size = parent->ecc_step_size;
        child->ecc_strength = parent->ecc_strength;
        child->bitflip_threshold = parent->bitflip_threshold;
@@ -193,7 +199,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
        if (master->_block_isbad) {
                uint64_t offs = 0;
 
-               while (offs < child->size) {
+               while (offs < child->part.size) {
                        if (mtd_block_isreserved(child, offs))
                                child->ecc_stats.bbtblocks++;
                        else if (mtd_block_isbad(child, offs))
@@ -234,6 +240,8 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
                      long long offset, long long length)
 {
        struct mtd_info *master = mtd_get_master(parent);
+       u64 parent_size = mtd_is_partition(parent) ?
+                         parent->part.size : parent->size;
        struct mtd_partition part;
        struct mtd_info *child;
        int ret = 0;
@@ -244,7 +252,7 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
                return -EINVAL;
 
        if (length == MTDPART_SIZ_FULL)
-               length = parent->size - offset;
+               length = parent_size - offset;
 
        if (length <= 0)
                return -EINVAL;
@@ -419,7 +427,7 @@ int add_mtd_partitions(struct mtd_info *parent,
                /* Look for subpartitions */
                parse_mtd_partitions(child, parts[i].types, NULL);
 
-               cur_offset = child->part.offset + child->size;
+               cur_offset = child->part.offset + child->part.size;
        }
 
        return 0;
index a80a46b..113f610 100644 (file)
@@ -213,10 +213,6 @@ config MTD_NAND_MLC_LPC32XX
          Please check the actual NAND chip connected and its support
          by the MLC NAND controller.
 
-config MTD_NAND_CM_X270
-       tristate "CM-X270 modules NAND controller"
-       depends on MACH_ARMCORE
-
 config MTD_NAND_PASEMI
        tristate "PA Semi PWRficient NAND controller"
        depends on PPC_PASEMI
@@ -457,6 +453,14 @@ config MTD_NAND_CADENCE
          Enable the driver for NAND flash on platforms using a Cadence NAND
          controller.
 
+config MTD_NAND_ARASAN
+       tristate "Support for Arasan NAND flash controller"
+       depends on HAS_IOMEM && HAS_DMA
+       select BCH
+       help
+         Enables the driver for the Arasan NAND flash controller on
+         Zynq Ultrascale+ MPSoC.
+
 comment "Misc"
 
 config MTD_SM_COMMON
index 2d136b1..2930f5b 100644 (file)
@@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_NAND_GPIO)           += gpio.o
 omap2_nand-objs := omap2.o
 obj-$(CONFIG_MTD_NAND_OMAP2)           += omap2_nand.o
 obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD)  += omap_elm.o
-obj-$(CONFIG_MTD_NAND_CM_X270)         += cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_MARVELL)         += marvell_nand.o
 obj-$(CONFIG_MTD_NAND_TMIO)            += tmio_nand.o
 obj-$(CONFIG_MTD_NAND_PLATFORM)                += plat_nand.o
@@ -58,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_TEGRA)          += tegra_nand.o
 obj-$(CONFIG_MTD_NAND_STM32_FMC2)      += stm32_fmc2_nand.o
 obj-$(CONFIG_MTD_NAND_MESON)           += meson_nand.o
 obj-$(CONFIG_MTD_NAND_CADENCE)         += cadence-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ARASAN)          += arasan-nand-controller.o
 
 nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
 nand-objs += nand_onfi.o
index d66dab2..3711e7a 100644 (file)
@@ -387,12 +387,15 @@ static int gpio_nand_remove(struct platform_device *pdev)
 {
        struct gpio_nand *priv = platform_get_drvdata(pdev);
        struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+       int ret;
 
        /* Apply write protection */
        gpiod_set_value(priv->gpiod_nwp, 1);
 
        /* Unregister device */
-       nand_release(mtd_to_nand(mtd));
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(mtd_to_nand(mtd));
 
        return 0;
 }
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
new file mode 100644 (file)
index 0000000..7141dcc
--- /dev/null
@@ -0,0 +1,1297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2020 Xilinx, Inc.
+ * Author:
+ *   Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (fully rewritten):
+ *   Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ *   Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/bch.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PKT_REG                                0x00
+#define   PKT_SIZE(x)                  FIELD_PREP(GENMASK(10, 0), (x))
+#define   PKT_STEPS(x)                 FIELD_PREP(GENMASK(23, 12), (x))
+
+#define MEM_ADDR1_REG                  0x04
+
+#define MEM_ADDR2_REG                  0x08
+#define   ADDR2_STRENGTH(x)            FIELD_PREP(GENMASK(27, 25), (x))
+#define   ADDR2_CS(x)                  FIELD_PREP(GENMASK(31, 30), (x))
+
+#define CMD_REG                                0x0C
+#define   CMD_1(x)                     FIELD_PREP(GENMASK(7, 0), (x))
+#define   CMD_2(x)                     FIELD_PREP(GENMASK(15, 8), (x))
+#define   CMD_PAGE_SIZE(x)             FIELD_PREP(GENMASK(25, 23), (x))
+#define   CMD_DMA_ENABLE               BIT(27)
+#define   CMD_NADDRS(x)                        FIELD_PREP(GENMASK(30, 28), (x))
+#define   CMD_ECC_ENABLE               BIT(31)
+
+#define PROG_REG                       0x10
+#define   PROG_PGRD                    BIT(0)
+#define   PROG_ERASE                   BIT(2)
+#define   PROG_STATUS                  BIT(3)
+#define   PROG_PGPROG                  BIT(4)
+#define   PROG_RDID                    BIT(6)
+#define   PROG_RDPARAM                 BIT(7)
+#define   PROG_RST                     BIT(8)
+#define   PROG_GET_FEATURE             BIT(9)
+#define   PROG_SET_FEATURE             BIT(10)
+
+#define INTR_STS_EN_REG                        0x14
+#define INTR_SIG_EN_REG                        0x18
+#define INTR_STS_REG                   0x1C
+#define   WRITE_READY                  BIT(0)
+#define   READ_READY                   BIT(1)
+#define   XFER_COMPLETE                        BIT(2)
+#define   DMA_BOUNDARY                 BIT(6)
+#define   EVENT_MASK                   GENMASK(7, 0)
+
+#define READY_STS_REG                  0x20
+
+#define DMA_ADDR0_REG                  0x50
+#define DMA_ADDR1_REG                  0x24
+
+#define FLASH_STS_REG                  0x28
+
+#define DATA_PORT_REG                  0x30
+
+#define ECC_CONF_REG                   0x34
+#define   ECC_CONF_COL(x)              FIELD_PREP(GENMASK(15, 0), (x))
+#define   ECC_CONF_LEN(x)              FIELD_PREP(GENMASK(26, 16), (x))
+#define   ECC_CONF_BCH_EN              BIT(27)
+
+#define ECC_ERR_CNT_REG                        0x38
+#define   GET_PKT_ERR_CNT(x)           FIELD_GET(GENMASK(7, 0), (x))
+#define   GET_PAGE_ERR_CNT(x)          FIELD_GET(GENMASK(16, 8), (x))
+
+#define ECC_SP_REG                     0x3C
+#define   ECC_SP_CMD1(x)               FIELD_PREP(GENMASK(7, 0), (x))
+#define   ECC_SP_CMD2(x)               FIELD_PREP(GENMASK(15, 8), (x))
+#define   ECC_SP_ADDRS(x)              FIELD_PREP(GENMASK(30, 28), (x))
+
+#define ECC_1ERR_CNT_REG               0x40
+#define ECC_2ERR_CNT_REG               0x44
+
+#define DATA_INTERFACE_REG             0x6C
+#define   DIFACE_SDR_MODE(x)           FIELD_PREP(GENMASK(2, 0), (x))
+#define   DIFACE_DDR_MODE(x)           FIELD_PREP(GENMASK(5, 3), (X))
+#define   DIFACE_SDR                   0
+#define   DIFACE_NVDDR                 BIT(9)
+
+#define ANFC_MAX_CS                    2
+#define ANFC_DFLT_TIMEOUT_US           1000000
+#define ANFC_MAX_CHUNK_SIZE            SZ_1M
+#define ANFC_MAX_PARAM_SIZE            SZ_4K
+#define ANFC_MAX_STEPS                 SZ_2K
+#define ANFC_MAX_PKT_SIZE              (SZ_2K - 1)
+#define ANFC_MAX_ADDR_CYC              5U
+#define ANFC_RSVD_ECC_BYTES            21
+
+#define ANFC_XLNX_SDR_DFLT_CORE_CLK    100000000
+#define ANFC_XLNX_SDR_HS_CORE_CLK      80000000
+
+/**
+ * struct anfc_op - Defines how to execute an operation
+ * @pkt_reg: Packet register
+ * @addr1_reg: Memory address 1 register
+ * @addr2_reg: Memory address 2 register
+ * @cmd_reg: Command register
+ * @prog_reg: Program register
+ * @steps: Number of "packets" to read/write
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @len: Data transfer length
+ * @read: Data transfer direction from the controller point of view
+ */
+struct anfc_op {
+       u32 pkt_reg;
+       u32 addr1_reg;
+       u32 addr2_reg;
+       u32 cmd_reg;
+       u32 prog_reg;
+       int steps;
+       unsigned int rdy_timeout_ms;
+       unsigned int len;
+       bool read;
+       u8 *buf;
+};
+
+/**
+ * struct anand - Defines the NAND chip related information
+ * @node:              Used to store NAND chips into a list
+ * @chip:              NAND chip information structure
+ * @cs:                        Chip select line
+ * @rb:                        Ready-busy line
+ * @page_sz:           Register value of the page_sz field to use
+ * @clk:               Expected clock frequency to use
+ * @timings:           Data interface timing mode to use
+ * @ecc_conf:          Hardware ECC configuration value
+ * @strength:          Register value of the ECC strength
+ * @raddr_cycles:      Row address cycle information
+ * @caddr_cycles:      Column address cycle information
+ * @ecc_bits:          Exact number of ECC bits per syndrome
+ * @ecc_total:         Total number of ECC bytes
+ * @errloc:            Array of errors located with soft BCH
+ * @hw_ecc:            Buffer to store syndromes computed by hardware
+ * @bch:               BCH structure
+ */
+struct anand {
+       struct list_head node;
+       struct nand_chip chip;
+       unsigned int cs;
+       unsigned int rb;
+       unsigned int page_sz;
+       unsigned long clk;
+       u32 timings;
+       u32 ecc_conf;
+       u32 strength;
+       u16 raddr_cycles;
+       u16 caddr_cycles;
+       unsigned int ecc_bits;
+       unsigned int ecc_total;
+       unsigned int *errloc;
+       u8 *hw_ecc;
+       struct bch_control *bch;
+};
+
+/**
+ * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
+ * @dev:               Pointer to the device structure
+ * @base:              Remapped register area
+ * @controller_clk:            Pointer to the system clock
+ * @bus_clk:           Pointer to the flash clock
+ * @controller:                Base controller structure
+ * @chips:             List of all NAND chips attached to the controller
+ * @assigned_cs:       Bitmask describing already assigned CS lines
+ * @cur_clk:           Current clock rate
+ */
+struct arasan_nfc {
+       struct device *dev;
+       void __iomem *base;
+       struct clk *controller_clk;
+       struct clk *bus_clk;
+       struct nand_controller controller;
+       struct list_head chips;
+       unsigned long assigned_cs;
+       unsigned int cur_clk;
+};
+
+static struct anand *to_anand(struct nand_chip *nand)
+{
+       return container_of(nand, struct anand, chip);
+}
+
+static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
+{
+       return container_of(ctrl, struct arasan_nfc, controller);
+}
+
+static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
+{
+       u32 val;
+       int ret;
+
+       ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
+                                        val & event, 0,
+                                        ANFC_DFLT_TIMEOUT_US);
+       if (ret) {
+               dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
+               return -ETIMEDOUT;
+       }
+
+       writel_relaxed(event, nfc->base + INTR_STS_REG);
+
+       return 0;
+}
+
+static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
+                           unsigned int timeout_ms)
+{
+       struct anand *anand = to_anand(chip);
+       u32 val;
+       int ret;
+
+       /* There is no R/B interrupt, we must poll a register */
+       ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
+                                        val & BIT(anand->rb),
+                                        1, timeout_ms * 1000);
+       if (ret) {
+               dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
+                       readl_relaxed(nfc->base + READY_STS_REG));
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+       writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
+       writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
+       writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
+       writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
+       writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
+}
+
+static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
+                              unsigned int *pktsize)
+{
+       unsigned int nb, sz;
+
+       for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
+               sz = len / nb;
+               if (sz <= ANFC_MAX_PKT_SIZE)
+                       break;
+       }
+
+       if (sz * nb != len)
+               return -ENOTSUPP;
+
+       if (steps)
+               *steps = nb;
+
+       if (pktsize)
+               *pktsize = sz;
+
+       return 0;
+}
+
+/*
+ * When using the embedded hardware ECC engine, the controller is in charge of
+ * feeding the engine with, first, the ECC residue present in the data array.
+ * A typical read operation is:
+ * 1/ Assert the read operation by sending the relevant command/address cycles
+ *    but targeting the column of the first ECC bytes in the OOB area instead of
+ *    the main data directly.
+ * 2/ After having read the relevant number of ECC bytes, the controller uses
+ *    the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
+ *    Register" to move the pointer back at the beginning of the main data.
+ * 3/ It will read the content of the main area for a given size (pktsize) and
+ *    will feed the ECC engine with this buffer again.
+ * 4/ The ECC engine derives the ECC bytes for the given data and compare them
+ *    with the ones already received. It eventually trigger status flags and
+ *    then set the "Buffer Read Ready" flag.
+ * 5/ The corrected data is then available for reading from the data port
+ *    register.
+ *
+ * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
+ * reports uncorrectable errors. Because of this bug, we have to use the
+ * software BCH implementation in the read path.
+ */
+static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+                                int oob_required, int page)
+{
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct anand *anand = to_anand(chip);
+       unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+       unsigned int max_bitflips = 0;
+       dma_addr_t dma_addr;
+       int step, ret;
+       struct anfc_op nfc_op = {
+               .pkt_reg =
+                       PKT_SIZE(chip->ecc.size) |
+                       PKT_STEPS(chip->ecc.steps),
+               .addr1_reg =
+                       (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+                       (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+               .addr2_reg =
+                       ((page >> 16) & 0xFF) |
+                       ADDR2_STRENGTH(anand->strength) |
+                       ADDR2_CS(anand->cs),
+               .cmd_reg =
+                       CMD_1(NAND_CMD_READ0) |
+                       CMD_2(NAND_CMD_READSTART) |
+                       CMD_PAGE_SIZE(anand->page_sz) |
+                       CMD_DMA_ENABLE |
+                       CMD_NADDRS(anand->caddr_cycles +
+                                  anand->raddr_cycles),
+               .prog_reg = PROG_PGRD,
+       };
+
+       dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
+       if (dma_mapping_error(nfc->dev, dma_addr)) {
+               dev_err(nfc->dev, "Buffer mapping error");
+               return -EIO;
+       }
+
+       writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+       writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+       anfc_trigger_op(nfc, &nfc_op);
+
+       ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+       dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
+       if (ret) {
+               dev_err(nfc->dev, "Error reading page %d\n", page);
+               return ret;
+       }
+
+       /* Store the raw OOB bytes as well */
+       ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
+                                        mtd->oobsize, 0);
+       if (ret)
+               return ret;
+
+       /*
+        * For each step, compute by softare the BCH syndrome over the raw data.
+        * Compare the theoretical amount of errors and compare with the
+        * hardware engine feedback.
+        */
+       for (step = 0; step < chip->ecc.steps; step++) {
+               u8 *raw_buf = &buf[step * chip->ecc.size];
+               unsigned int bit, byte;
+               int bf, i;
+
+               /* Extract the syndrome, it is not necessarily aligned */
+               memset(anand->hw_ecc, 0, chip->ecc.bytes);
+               nand_extract_bits(anand->hw_ecc, 0,
+                                 &chip->oob_poi[mtd->oobsize - anand->ecc_total],
+                                 anand->ecc_bits * step, anand->ecc_bits);
+
+               bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
+                               anand->hw_ecc, NULL, NULL, anand->errloc);
+               if (!bf) {
+                       continue;
+               } else if (bf > 0) {
+                       for (i = 0; i < bf; i++) {
+                               /* Only correct the data, not the syndrome */
+                               if (anand->errloc[i] < (chip->ecc.size * 8)) {
+                                       bit = BIT(anand->errloc[i] & 7);
+                                       byte = anand->errloc[i] >> 3;
+                                       raw_buf[byte] ^= bit;
+                               }
+                       }
+
+                       mtd->ecc_stats.corrected += bf;
+                       max_bitflips = max_t(unsigned int, max_bitflips, bf);
+
+                       continue;
+               }
+
+               bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
+                                                NULL, 0, NULL, 0,
+                                                chip->ecc.strength);
+               if (bf > 0) {
+                       mtd->ecc_stats.corrected += bf;
+                       max_bitflips = max_t(unsigned int, max_bitflips, bf);
+                       memset(raw_buf, 0xFF, chip->ecc.size);
+               } else if (bf < 0) {
+                       mtd->ecc_stats.failed++;
+               }
+       }
+
+       return 0;
+}
+
+static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+                                 int oob_required, int page)
+{
+       struct anand *anand = to_anand(chip);
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+       dma_addr_t dma_addr;
+       int ret;
+       struct anfc_op nfc_op = {
+               .pkt_reg =
+                       PKT_SIZE(chip->ecc.size) |
+                       PKT_STEPS(chip->ecc.steps),
+               .addr1_reg =
+                       (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+                       (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+               .addr2_reg =
+                       ((page >> 16) & 0xFF) |
+                       ADDR2_STRENGTH(anand->strength) |
+                       ADDR2_CS(anand->cs),
+               .cmd_reg =
+                       CMD_1(NAND_CMD_SEQIN) |
+                       CMD_2(NAND_CMD_PAGEPROG) |
+                       CMD_PAGE_SIZE(anand->page_sz) |
+                       CMD_DMA_ENABLE |
+                       CMD_NADDRS(anand->caddr_cycles +
+                                  anand->raddr_cycles) |
+                       CMD_ECC_ENABLE,
+               .prog_reg = PROG_PGPROG,
+       };
+
+       writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
+       writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
+                      ECC_SP_ADDRS(anand->caddr_cycles),
+                      nfc->base + ECC_SP_REG);
+
+       dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(nfc->dev, dma_addr)) {
+               dev_err(nfc->dev, "Buffer mapping error");
+               return -EIO;
+       }
+
+       writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+       writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+       anfc_trigger_op(nfc, &nfc_op);
+       ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+       dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
+       if (ret) {
+               dev_err(nfc->dev, "Error writing page %d\n", page);
+               return ret;
+       }
+
+       /* Spare data is not protected */
+       if (oob_required)
+               ret = nand_write_oob_std(chip, page);
+
+       return ret;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int anfc_parse_instructions(struct nand_chip *chip,
+                                  const struct nand_subop *subop,
+                                  struct anfc_op *nfc_op)
+{
+       struct anand *anand = to_anand(chip);
+       const struct nand_op_instr *instr = NULL;
+       bool first_cmd = true;
+       unsigned int op_id;
+       int ret, i;
+
+       memset(nfc_op, 0, sizeof(*nfc_op));
+       nfc_op->addr2_reg = ADDR2_CS(anand->cs);
+       nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
+
+       for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+               unsigned int offset, naddrs, pktsize;
+               const u8 *addrs;
+               u8 *buf;
+
+               instr = &subop->instrs[op_id];
+
+               switch (instr->type) {
+               case NAND_OP_CMD_INSTR:
+                       if (first_cmd)
+                               nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
+                       else
+                               nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
+
+                       first_cmd = false;
+                       break;
+
+               case NAND_OP_ADDR_INSTR:
+                       offset = nand_subop_get_addr_start_off(subop, op_id);
+                       naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+                       addrs = &instr->ctx.addr.addrs[offset];
+                       nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
+
+                       for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
+                               if (i < 4)
+                                       nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
+                               else
+                                       nfc_op->addr2_reg |= addrs[i];
+                       }
+
+                       break;
+               case NAND_OP_DATA_IN_INSTR:
+                       nfc_op->read = true;
+                       fallthrough;
+               case NAND_OP_DATA_OUT_INSTR:
+                       offset = nand_subop_get_data_start_off(subop, op_id);
+                       buf = instr->ctx.data.buf.in;
+                       nfc_op->buf = &buf[offset];
+                       nfc_op->len = nand_subop_get_data_len(subop, op_id);
+                       ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
+                                                 &pktsize);
+                       if (ret)
+                               return ret;
+
+                       /*
+                        * Number of DATA cycles must be aligned on 4, this
+                        * means the controller might read/write more than
+                        * requested. This is harmless most of the time as extra
+                        * DATA are discarded in the write path and read pointer
+                        * adjusted in the read path.
+                        *
+                        * FIXME: The core should mark operations where
+                        * reading/writing more is allowed so the exec_op()
+                        * implementation can take the right decision when the
+                        * alignment constraint is not met: adjust the number of
+                        * DATA cycles when it's allowed, reject the operation
+                        * otherwise.
+                        */
+                       nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
+                                          PKT_STEPS(nfc_op->steps);
+                       break;
+               case NAND_OP_WAITRDY_INSTR:
+                       nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+       unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
+       unsigned int last_len = nfc_op->len % 4;
+       unsigned int offset, dir;
+       u8 *buf = nfc_op->buf;
+       int ret, i;
+
+       for (i = 0; i < nfc_op->steps; i++) {
+               dir = nfc_op->read ? READ_READY : WRITE_READY;
+               ret = anfc_wait_for_event(nfc, dir);
+               if (ret) {
+                       dev_err(nfc->dev, "PIO %s ready signal not received\n",
+                               nfc_op->read ? "Read" : "Write");
+                       return ret;
+               }
+
+               offset = i * (dwords * 4);
+               if (nfc_op->read)
+                       ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+                                    dwords);
+               else
+                       iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+                                     dwords);
+       }
+
+       if (last_len) {
+               u32 remainder;
+
+               offset = nfc_op->len - last_len;
+
+               if (nfc_op->read) {
+                       remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
+                       memcpy(&buf[offset], &remainder, last_len);
+               } else {
+                       memcpy(&remainder, &buf[offset], last_len);
+                       writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
+               }
+       }
+
+       return anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_misc_data_type_exec(struct nand_chip *chip,
+                                   const struct nand_subop *subop,
+                                   u32 prog_reg)
+{
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct anfc_op nfc_op = {};
+       int ret;
+
+       ret = anfc_parse_instructions(chip, subop, &nfc_op);
+       if (ret)
+               return ret;
+
+       nfc_op.prog_reg = prog_reg;
+       anfc_trigger_op(nfc, &nfc_op);
+
+       if (nfc_op.rdy_timeout_ms) {
+               ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+               if (ret)
+                       return ret;
+       }
+
+       return anfc_rw_pio_op(nfc, &nfc_op);
+}
+
+static int anfc_param_read_type_exec(struct nand_chip *chip,
+                                    const struct nand_subop *subop)
+{
+       return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
+}
+
+static int anfc_data_read_type_exec(struct nand_chip *chip,
+                                   const struct nand_subop *subop)
+{
+       return anfc_misc_data_type_exec(chip, subop, PROG_PGRD);
+}
+
+static int anfc_param_write_type_exec(struct nand_chip *chip,
+                                     const struct nand_subop *subop)
+{
+       return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
+}
+
+static int anfc_data_write_type_exec(struct nand_chip *chip,
+                                    const struct nand_subop *subop)
+{
+       return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
+}
+
+static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
+                                      const struct nand_subop *subop,
+                                      u32 prog_reg)
+{
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct anfc_op nfc_op = {};
+       int ret;
+
+       ret = anfc_parse_instructions(chip, subop, &nfc_op);
+       if (ret)
+               return ret;
+
+       nfc_op.prog_reg = prog_reg;
+       anfc_trigger_op(nfc, &nfc_op);
+
+       ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+       if (ret)
+               return ret;
+
+       if (nfc_op.rdy_timeout_ms)
+               ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+
+       return ret;
+}
+
+static int anfc_status_type_exec(struct nand_chip *chip,
+                                const struct nand_subop *subop)
+{
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       u32 tmp;
+       int ret;
+
+       /* See anfc_check_op() for details about this constraint */
+       if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
+               return -ENOTSUPP;
+
+       ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
+       if (ret)
+               return ret;
+
+       tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
+       memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
+
+       return 0;
+}
+
+static int anfc_reset_type_exec(struct nand_chip *chip,
+                               const struct nand_subop *subop)
+{
+       return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
+}
+
+static int anfc_erase_type_exec(struct nand_chip *chip,
+                               const struct nand_subop *subop)
+{
+       return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
+}
+
+static int anfc_wait_type_exec(struct nand_chip *chip,
+                              const struct nand_subop *subop)
+{
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct anfc_op nfc_op = {};
+       int ret;
+
+       ret = anfc_parse_instructions(chip, subop, &nfc_op);
+       if (ret)
+               return ret;
+
+       return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+       NAND_OP_PARSER_PATTERN(
+               anfc_param_read_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+               NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+               NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_param_write_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+               NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_data_read_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+               NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_data_write_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+               NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+               NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_reset_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_erase_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_status_type_exec,
+               NAND_OP_PARSER_PAT_CMD_ELEM(false),
+               NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+       NAND_OP_PARSER_PATTERN(
+               anfc_wait_type_exec,
+               NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+       );
+
+static int anfc_select_target(struct nand_chip *chip, int target)
+{
+       struct anand *anand = to_anand(chip);
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       int ret;
+
+       /* Update the controller timings and the potential ECC configuration */
+       writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
+
+       /* Update clock frequency */
+       if (nfc->cur_clk != anand->clk) {
+               clk_disable_unprepare(nfc->controller_clk);
+               ret = clk_set_rate(nfc->controller_clk, anand->clk);
+               if (ret) {
+                       dev_err(nfc->dev, "Failed to change clock rate\n");
+                       return ret;
+               }
+
+               ret = clk_prepare_enable(nfc->controller_clk);
+               if (ret) {
+                       dev_err(nfc->dev,
+                               "Failed to re-enable the controller clock\n");
+                       return ret;
+               }
+
+               nfc->cur_clk = anand->clk;
+       }
+
+       return 0;
+}
+
+static int anfc_check_op(struct nand_chip *chip,
+                        const struct nand_operation *op)
+{
+       const struct nand_op_instr *instr;
+       int op_id;
+
+       /*
+        * The controller abstracts all the NAND operations and do not support
+        * data only operations.
+        *
+        * TODO: The nand_op_parser framework should be extended to
+        * support custom checks on DATA instructions.
+        */
+       for (op_id = 0; op_id < op->ninstrs; op_id++) {
+               instr = &op->instrs[op_id];
+
+               switch (instr->type) {
+               case NAND_OP_ADDR_INSTR:
+                       if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
+                               return -ENOTSUPP;
+
+                       break;
+               case NAND_OP_DATA_IN_INSTR:
+               case NAND_OP_DATA_OUT_INSTR:
+                       if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
+                               return -ENOTSUPP;
+
+                       if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
+                               return -ENOTSUPP;
+
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /*
+        * The controller does not allow to proceed with a CMD+DATA_IN cycle
+        * manually on the bus by reading data from the data register. Instead,
+        * the controller abstract a status read operation with its own status
+        * register after ordering a read status operation. Hence, we cannot
+        * support any CMD+DATA_IN operation other than a READ STATUS.
+        *
+        * TODO: The nand_op_parser() framework should be extended to describe
+        * fixed patterns instead of open-coding this check here.
+        */
+       if (op->ninstrs == 2 &&
+           op->instrs[0].type == NAND_OP_CMD_INSTR &&
+           op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
+           op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
+               return -ENOTSUPP;
+
+       return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
+}
+
+static int anfc_exec_op(struct nand_chip *chip,
+                       const struct nand_operation *op,
+                       bool check_only)
+{
+       int ret;
+
+       if (check_only)
+               return anfc_check_op(chip, op);
+
+       ret = anfc_select_target(chip, op->cs);
+       if (ret)
+               return ret;
+
+       return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
+}
+
+static int anfc_setup_data_interface(struct nand_chip *chip, int target,
+                                    const struct nand_data_interface *conf)
+{
+       struct anand *anand = to_anand(chip);
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct device_node *np = nfc->dev->of_node;
+
+       if (target < 0)
+               return 0;
+
+       anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode);
+       anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+
+       /*
+        * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
+        * with f > 90MHz (default clock is 100MHz) but signals are unstable
+        * with higher modes. Hence we decrease a little bit the clock rate to
+        * 80MHz when using modes 2-5 with this SoC.
+        */
+       if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
+           conf->timings.mode >= 2)
+               anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
+
+       return 0;
+}
+
+static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
+{
+       unsigned int bch_gf_mag, ecc_bits;
+
+       switch (step_size) {
+       case SZ_512:
+               bch_gf_mag = 13;
+               break;
+       case SZ_1K:
+               bch_gf_mag = 14;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ecc_bits = bch_gf_mag * strength;
+
+       return DIV_ROUND_UP(ecc_bits, 8);
+}
+
+static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
+
+static const int anfc_hw_ecc_1024_strengths[] = {24};
+
+static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
+       {
+               .stepsize = SZ_512,
+               .strengths = anfc_hw_ecc_512_strengths,
+               .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
+       },
+       {
+               .stepsize = SZ_1K,
+               .strengths = anfc_hw_ecc_1024_strengths,
+               .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
+       },
+};
+
+static const struct nand_ecc_caps anfc_hw_ecc_caps = {
+       .stepinfos = anfc_hw_ecc_step_infos,
+       .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
+       .calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
+};
+
+static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
+                                      struct nand_chip *chip)
+{
+       struct anand *anand = to_anand(chip);
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
+       int ret;
+
+       switch (mtd->writesize) {
+       case SZ_512:
+       case SZ_2K:
+       case SZ_4K:
+       case SZ_8K:
+       case SZ_16K:
+               break;
+       default:
+               dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
+               return -EINVAL;
+       }
+
+       ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
+       if (ret)
+               return ret;
+
+       switch (ecc->strength) {
+       case 12:
+               anand->strength = 0x1;
+               break;
+       case 8:
+               anand->strength = 0x2;
+               break;
+       case 4:
+               anand->strength = 0x3;
+               break;
+       case 24:
+               anand->strength = 0x4;
+               break;
+       default:
+               dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
+               return -EINVAL;
+       }
+
+       switch (ecc->size) {
+       case SZ_512:
+               bch_gf_mag = 13;
+               bch_prim_poly = 0x201b;
+               break;
+       case SZ_1K:
+               bch_gf_mag = 14;
+               bch_prim_poly = 0x4443;
+               break;
+       default:
+               dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
+               return -EINVAL;
+       }
+
+       mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+
+       ecc->steps = mtd->writesize / ecc->size;
+       ecc->algo = NAND_ECC_BCH;
+       anand->ecc_bits = bch_gf_mag * ecc->strength;
+       ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
+       anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
+       ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
+       anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
+                         ECC_CONF_LEN(anand->ecc_total) |
+                         ECC_CONF_BCH_EN;
+
+       anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
+                                          sizeof(*anand->errloc), GFP_KERNEL);
+       if (!anand->errloc)
+               return -ENOMEM;
+
+       anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
+       if (!anand->hw_ecc)
+               return -ENOMEM;
+
+       /* Enforce bit swapping to fit the hardware */
+       anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
+       if (!anand->bch)
+               return -EINVAL;
+
+       ecc->read_page = anfc_read_page_hw_ecc;
+       ecc->write_page = anfc_write_page_hw_ecc;
+
+       return 0;
+}
+
+static int anfc_attach_chip(struct nand_chip *chip)
+{
+       struct anand *anand = to_anand(chip);
+       struct arasan_nfc *nfc = to_anfc(chip->controller);
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       int ret = 0;
+
+       if (mtd->writesize <= SZ_512)
+               anand->caddr_cycles = 1;
+       else
+               anand->caddr_cycles = 2;
+
+       if (chip->options & NAND_ROW_ADDR_3)
+               anand->raddr_cycles = 3;
+       else
+               anand->raddr_cycles = 2;
+
+       switch (mtd->writesize) {
+       case 512:
+               anand->page_sz = 0;
+               break;
+       case 1024:
+               anand->page_sz = 5;
+               break;
+       case 2048:
+               anand->page_sz = 1;
+               break;
+       case 4096:
+               anand->page_sz = 2;
+               break;
+       case 8192:
+               anand->page_sz = 3;
+               break;
+       case 16384:
+               anand->page_sz = 4;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* These hooks are valid for all ECC providers */
+       chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+       chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+       switch (chip->ecc.mode) {
+       case NAND_ECC_NONE:
+       case NAND_ECC_SOFT:
+       case NAND_ECC_ON_DIE:
+               break;
+       case NAND_ECC_HW:
+               ret = anfc_init_hw_ecc_controller(nfc, chip);
+               break;
+       default:
+               dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+                       chip->ecc.mode);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static void anfc_detach_chip(struct nand_chip *chip)
+{
+       struct anand *anand = to_anand(chip);
+
+       if (anand->bch)
+               bch_free(anand->bch);
+}
+
+static const struct nand_controller_ops anfc_ops = {
+       .exec_op = anfc_exec_op,
+       .setup_data_interface = anfc_setup_data_interface,
+       .attach_chip = anfc_attach_chip,
+       .detach_chip = anfc_detach_chip,
+};
+
+static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
+{
+       struct anand *anand;
+       struct nand_chip *chip;
+       struct mtd_info *mtd;
+       int cs, rb, ret;
+
+       anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
+       if (!anand)
+               return -ENOMEM;
+
+       /* We do not support multiple CS per chip yet */
+       if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) {
+               dev_err(nfc->dev, "Invalid reg property\n");
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32(np, "reg", &cs);
+       if (ret)
+               return ret;
+
+       ret = of_property_read_u32(np, "nand-rb", &rb);
+       if (ret)
+               return ret;
+
+       if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) {
+               dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb);
+               return -EINVAL;
+       }
+
+       if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+               dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+               return -EINVAL;
+       }
+
+       anand->cs = cs;
+       anand->rb = rb;
+
+       chip = &anand->chip;
+       mtd = nand_to_mtd(chip);
+       mtd->dev.parent = nfc->dev;
+       chip->controller = &nfc->controller;
+       chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+                       NAND_USES_DMA;
+
+       nand_set_flash_node(chip, np);
+       if (!mtd->name) {
+               dev_err(nfc->dev, "NAND label property is mandatory\n");
+               return -EINVAL;
+       }
+
+       ret = nand_scan(chip, 1);
+       if (ret) {
+               dev_err(nfc->dev, "Scan operation failed\n");
+               return ret;
+       }
+
+       ret = mtd_device_register(mtd, NULL, 0);
+       if (ret) {
+               nand_cleanup(chip);
+               return ret;
+       }
+
+       list_add_tail(&anand->node, &nfc->chips);
+
+       return 0;
+}
+
+static void anfc_chips_cleanup(struct arasan_nfc *nfc)
+{
+       struct anand *anand, *tmp;
+       struct nand_chip *chip;
+       int ret;
+
+       list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
+               chip = &anand->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+               list_del(&anand->node);
+       }
+}
+
+static int anfc_chips_init(struct arasan_nfc *nfc)
+{
+       struct device_node *np = nfc->dev->of_node, *nand_np;
+       int nchips = of_get_child_count(np);
+       int ret;
+
+       if (!nchips || nchips > ANFC_MAX_CS) {
+               dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+                       nchips);
+               return -EINVAL;
+       }
+
+       for_each_child_of_node(np, nand_np) {
+               ret = anfc_chip_init(nfc, nand_np);
+               if (ret) {
+                       of_node_put(nand_np);
+                       anfc_chips_cleanup(nfc);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static void anfc_reset(struct arasan_nfc *nfc)
+{
+       /* Disable interrupt signals */
+       writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
+
+       /* Enable interrupt status */
+       writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+       struct arasan_nfc *nfc;
+       int ret;
+
+       nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+       if (!nfc)
+               return -ENOMEM;
+
+       nfc->dev = &pdev->dev;
+       nand_controller_init(&nfc->controller);
+       nfc->controller.ops = &anfc_ops;
+       INIT_LIST_HEAD(&nfc->chips);
+
+       nfc->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(nfc->base))
+               return PTR_ERR(nfc->base);
+
+       anfc_reset(nfc);
+
+       nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+       if (IS_ERR(nfc->controller_clk))
+               return PTR_ERR(nfc->controller_clk);
+
+       nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+       if (IS_ERR(nfc->bus_clk))
+               return PTR_ERR(nfc->bus_clk);
+
+       ret = clk_prepare_enable(nfc->controller_clk);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(nfc->bus_clk);
+       if (ret)
+               goto disable_controller_clk;
+
+       ret = anfc_chips_init(nfc);
+       if (ret)
+               goto disable_bus_clk;
+
+       platform_set_drvdata(pdev, nfc);
+
+       return 0;
+
+disable_bus_clk:
+       clk_disable_unprepare(nfc->bus_clk);
+
+disable_controller_clk:
+       clk_disable_unprepare(nfc->controller_clk);
+
+       return ret;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+       struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+
+       anfc_chips_cleanup(nfc);
+
+       clk_disable_unprepare(nfc->bus_clk);
+       clk_disable_unprepare(nfc->controller_clk);
+
+       return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+       {
+               .compatible = "xlnx,zynqmp-nand-controller",
+       },
+       {
+               .compatible = "arasan,nfc-v3p10",
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static struct platform_driver anfc_driver = {
+       .driver = {
+               .name = "arasan-nand-controller",
+               .of_match_table = anfc_ids,
+       },
+       .probe = anfc_probe,
+       .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
index 3ba17a9..46a3724 100644 (file)
@@ -1494,7 +1494,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
         * suitable for DMA.
         */
        if (nc->dmac)
-               chip->options |= NAND_USE_BOUNCE_BUFFER;
+               chip->options |= NAND_USES_DMA;
 
        /* Default to HW ECC if pmecc is available. */
        if (nc->pmecc)
index 75eb3e9..d865200 100644 (file)
 
 
 struct au1550nd_ctx {
+       struct nand_controller controller;
        struct nand_chip chip;
 
        int cs;
        void __iomem *base;
-       void (*write_byte)(struct nand_chip *, u_char);
 };
 
-/**
- * au_read_byte -  read one byte from the chip
- * @this:      NAND chip object
- *
- * read function for 8bit buswidth
- */
-static u_char au_read_byte(struct nand_chip *this)
-{
-       u_char ret = readb(this->legacy.IO_ADDR_R);
-       wmb(); /* drain writebuffer */
-       return ret;
-}
-
-/**
- * au_write_byte -  write one byte to the chip
- * @this:      NAND chip object
- * @byte:      pointer to data byte to write
- *
- * write function for 8it buswidth
- */
-static void au_write_byte(struct nand_chip *this, u_char byte)
-{
-       writeb(byte, this->legacy.IO_ADDR_W);
-       wmb(); /* drain writebuffer */
-}
-
-/**
- * au_read_byte16 -  read one byte endianness aware from the chip
- * @this:      NAND chip object
- *
- * read function for 16bit buswidth with endianness conversion
- */
-static u_char au_read_byte16(struct nand_chip *this)
-{
-       u_char ret = (u_char) cpu_to_le16(readw(this->legacy.IO_ADDR_R));
-       wmb(); /* drain writebuffer */
-       return ret;
-}
-
-/**
- * au_write_byte16 -  write one byte endianness aware to the chip
- * @this:      NAND chip object
- * @byte:      pointer to data byte to write
- *
- * write function for 16bit buswidth with endianness conversion
- */
-static void au_write_byte16(struct nand_chip *this, u_char byte)
+static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this)
 {
-       writew(le16_to_cpu((u16) byte), this->legacy.IO_ADDR_W);
-       wmb(); /* drain writebuffer */
+       return container_of(this, struct au1550nd_ctx, chip);
 }
 
 /**
@@ -83,12 +36,15 @@ static void au_write_byte16(struct nand_chip *this, u_char byte)
  *
  * write function for 8bit buswidth
  */
-static void au_write_buf(struct nand_chip *this, const u_char *buf, int len)
+static void au_write_buf(struct nand_chip *this, const void *buf,
+                        unsigned int len)
 {
+       struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+       const u8 *p = buf;
        int i;
 
        for (i = 0; i < len; i++) {
-               writeb(buf[i], this->legacy.IO_ADDR_W);
+               writeb(p[i], ctx->base + MEM_STNAND_DATA);
                wmb(); /* drain writebuffer */
        }
 }
@@ -101,12 +57,15 @@ static void au_write_buf(struct nand_chip *this, const u_char *buf, int len)
  *
  * read function for 8bit buswidth
  */
-static void au_read_buf(struct nand_chip *this, u_char *buf, int len)
+static void au_read_buf(struct nand_chip *this, void *buf,
+                       unsigned int len)
 {
+       struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+       u8 *p = buf;
        int i;
 
        for (i = 0; i < len; i++) {
-               buf[i] = readb(this->legacy.IO_ADDR_R);
+               p[i] = readb(ctx->base + MEM_STNAND_DATA);
                wmb(); /* drain writebuffer */
        }
 }
@@ -119,17 +78,18 @@ static void au_read_buf(struct nand_chip *this, u_char *buf, int len)
  *
  * write function for 16bit buswidth
  */
-static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
+static void au_write_buf16(struct nand_chip *this, const void *buf,
+                          unsigned int len)
 {
-       int i;
-       u16 *p = (u16 *) buf;
-       len >>= 1;
+       struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+       const u16 *p = buf;
+       unsigned int i;
 
+       len >>= 1;
        for (i = 0; i < len; i++) {
-               writew(p[i], this->legacy.IO_ADDR_W);
+               writew(p[i], ctx->base + MEM_STNAND_DATA);
                wmb(); /* drain writebuffer */
        }
-
 }
 
 /**
@@ -140,239 +100,146 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
  *
  * read function for 16bit buswidth
  */
-static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len)
 {
-       int i;
-       u16 *p = (u16 *) buf;
-       len >>= 1;
+       struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+       unsigned int i;
+       u16 *p = buf;
 
+       len >>= 1;
        for (i = 0; i < len; i++) {
-               p[i] = readw(this->legacy.IO_ADDR_R);
+               p[i] = readw(ctx->base + MEM_STNAND_DATA);
                wmb(); /* drain writebuffer */
        }
 }
 
-/* Select the chip by setting nCE to low */
-#define NAND_CTL_SETNCE                1
-/* Deselect the chip by setting nCE to high */
-#define NAND_CTL_CLRNCE                2
-/* Select the command latch by setting CLE to high */
-#define NAND_CTL_SETCLE                3
-/* Deselect the command latch by setting CLE to low */
-#define NAND_CTL_CLRCLE                4
-/* Select the address latch by setting ALE to high */
-#define NAND_CTL_SETALE                5
-/* Deselect the address latch by setting ALE to low */
-#define NAND_CTL_CLRALE                6
-
-static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
+static int find_nand_cs(unsigned long nand_base)
 {
-       struct nand_chip *this = mtd_to_nand(mtd);
-       struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
-                                               chip);
+       void __iomem *base =
+                       (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+       unsigned long addr, staddr, start, mask, end;
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               addr = 0x1000 + (i * 0x10);                     /* CSx */
+               staddr = __raw_readl(base + addr + 0x08);       /* STADDRx */
+               /* figure out the decoded range of this CS */
+               start = (staddr << 4) & 0xfffc0000;
+               mask = (staddr << 18) & 0xfffc0000;
+               end = (start | (start - 1)) & ~(start ^ mask);
+               if ((nand_base >= start) && (nand_base < end))
+                       return i;
+       }
 
-       switch (cmd) {
+       return -ENODEV;
+}
 
-       case NAND_CTL_SETCLE:
-               this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
-               break;
+static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms)
+{
+       unsigned long timeout_jiffies = jiffies;
+
+       timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1;
+       do {
+               if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1)
+                       return 0;
 
-       case NAND_CTL_CLRCLE:
-               this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+               usleep_range(10, 100);
+       } while (time_before(jiffies, timeout_jiffies));
+
+       return -ETIMEDOUT;
+}
+
+static int au1550nd_exec_instr(struct nand_chip *this,
+                              const struct nand_op_instr *instr)
+{
+       struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+       unsigned int i;
+       int ret = 0;
+
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               writeb(instr->ctx.cmd.opcode,
+                      ctx->base + MEM_STNAND_CMD);
+               /* Drain the writebuffer */
+               wmb();
                break;
 
-       case NAND_CTL_SETALE:
-               this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+                       writeb(instr->ctx.addr.addrs[i],
+                              ctx->base + MEM_STNAND_ADDR);
+                       /* Drain the writebuffer */
+                       wmb();
+               }
                break;
 
-       case NAND_CTL_CLRALE:
-               this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
-               /* FIXME: Nobody knows why this is necessary,
-                * but it works only that way */
-               udelay(1);
+       case NAND_OP_DATA_IN_INSTR:
+               if ((this->options & NAND_BUSWIDTH_16) &&
+                   !instr->ctx.data.force_8bit)
+                       au_read_buf16(this, instr->ctx.data.buf.in,
+                                     instr->ctx.data.len);
+               else
+                       au_read_buf(this, instr->ctx.data.buf.in,
+                                   instr->ctx.data.len);
                break;
 
-       case NAND_CTL_SETNCE:
-               /* assert (force assert) chip enable */
-               alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+       case NAND_OP_DATA_OUT_INSTR:
+               if ((this->options & NAND_BUSWIDTH_16) &&
+                   !instr->ctx.data.force_8bit)
+                       au_write_buf16(this, instr->ctx.data.buf.out,
+                                      instr->ctx.data.len);
+               else
+                       au_write_buf(this, instr->ctx.data.buf.out,
+                                    instr->ctx.data.len);
                break;
 
-       case NAND_CTL_CLRNCE:
-               /* deassert chip enable */
-               alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+       case NAND_OP_WAITRDY_INSTR:
+               ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms);
                break;
+       default:
+               return -EINVAL;
        }
 
-       this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W;
-
-       wmb(); /* Drain the writebuffer */
-}
-
-int au1550_device_ready(struct nand_chip *this)
-{
-       return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
-}
+       if (instr->delay_ns)
+               ndelay(instr->delay_ns);
 
-/**
- * au1550_select_chip - control -CE line
- *     Forbid driving -CE manually permitting the NAND controller to do this.
- *     Keeping -CE asserted during the whole sector reads interferes with the
- *     NOR flash and PCMCIA drivers as it causes contention on the static bus.
- *     We only have to hold -CE low for the NAND read commands since the flash
- *     chip needs it to be asserted during chip not ready time but the NAND
- *     controller keeps it released.
- *
- * @this:      NAND chip object
- * @chip:      chipnumber to select, -1 for deselect
- */
-static void au1550_select_chip(struct nand_chip *this, int chip)
-{
+       return ret;
 }
 
-/**
- * au1550_command - Send command to NAND device
- * @this:      NAND chip object
- * @command:   the command to be sent
- * @column:    the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- */
-static void au1550_command(struct nand_chip *this, unsigned command,
-                          int column, int page_addr)
+static int au1550nd_exec_op(struct nand_chip *this,
+                           const struct nand_operation *op,
+                           bool check_only)
 {
-       struct mtd_info *mtd = nand_to_mtd(this);
-       struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
-                                               chip);
-       int ce_override = 0, i;
-       unsigned long flags = 0;
-
-       /* Begin command latch cycle */
-       au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
-       /*
-        * Write out the command to the device.
-        */
-       if (command == NAND_CMD_SEQIN) {
-               int readcmd;
-
-               if (column >= mtd->writesize) {
-                       /* OOB area */
-                       column -= mtd->writesize;
-                       readcmd = NAND_CMD_READOOB;
-               } else if (column < 256) {
-                       /* First 256 bytes --> READ0 */
-                       readcmd = NAND_CMD_READ0;
-               } else {
-                       column -= 256;
-                       readcmd = NAND_CMD_READ1;
-               }
-               ctx->write_byte(this, readcmd);
-       }
-       ctx->write_byte(this, command);
+       struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+       unsigned int i;
+       int ret;
 
-       /* Set ALE and clear CLE to start address cycle */
-       au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
+       if (check_only)
+               return 0;
 
-       if (column != -1 || page_addr != -1) {
-               au1550_hwcontrol(mtd, NAND_CTL_SETALE);
+       /* assert (force assert) chip enable */
+       alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+       /* Drain the writebuffer */
+       wmb();
 
-               /* Serially input address */
-               if (column != -1) {
-                       /* Adjust columns for 16 bit buswidth */
-                       if (this->options & NAND_BUSWIDTH_16 &&
-                                       !nand_opcode_8bits(command))
-                               column >>= 1;
-                       ctx->write_byte(this, column);
-               }
-               if (page_addr != -1) {
-                       ctx->write_byte(this, (u8)(page_addr & 0xff));
-
-                       if (command == NAND_CMD_READ0 ||
-                           command == NAND_CMD_READ1 ||
-                           command == NAND_CMD_READOOB) {
-                               /*
-                                * NAND controller will release -CE after
-                                * the last address byte is written, so we'll
-                                * have to forcibly assert it. No interrupts
-                                * are allowed while we do this as we don't
-                                * want the NOR flash or PCMCIA drivers to
-                                * steal our precious bytes of data...
-                                */
-                               ce_override = 1;
-                               local_irq_save(flags);
-                               au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
-                       }
-
-                       ctx->write_byte(this, (u8)(page_addr >> 8));
-
-                       if (this->options & NAND_ROW_ADDR_3)
-                               ctx->write_byte(this,
-                                               ((page_addr >> 16) & 0x0f));
-               }
-               /* Latch in address */
-               au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
-       }
-
-       /*
-        * Program and erase have their own busy handlers.
-        * Status and sequential in need no delay.
-        */
-       switch (command) {
-
-       case NAND_CMD_PAGEPROG:
-       case NAND_CMD_ERASE1:
-       case NAND_CMD_ERASE2:
-       case NAND_CMD_SEQIN:
-       case NAND_CMD_STATUS:
-               return;
-
-       case NAND_CMD_RESET:
-               break;
-
-       case NAND_CMD_READ0:
-       case NAND_CMD_READ1:
-       case NAND_CMD_READOOB:
-               /* Check if we're really driving -CE low (just in case) */
-               if (unlikely(!ce_override))
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = au1550nd_exec_instr(this, &op->instrs[i]);
+               if (ret)
                        break;
-
-               /* Apply a short delay always to ensure that we do wait tWB. */
-               ndelay(100);
-               /* Wait for a chip to become ready... */
-               for (i = this->legacy.chip_delay;
-                    !this->legacy.dev_ready(this) && i > 0; --i)
-                       udelay(1);
-
-               /* Release -CE and re-enable interrupts. */
-               au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
-               local_irq_restore(flags);
-               return;
        }
-       /* Apply this short delay always to ensure that we do wait tWB. */
-       ndelay(100);
-
-       while(!this->legacy.dev_ready(this));
-}
 
-static int find_nand_cs(unsigned long nand_base)
-{
-       void __iomem *base =
-                       (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
-       unsigned long addr, staddr, start, mask, end;
-       int i;
+       /* deassert chip enable */
+       alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+       /* Drain the writebuffer */
+       wmb();
 
-       for (i = 0; i < 4; i++) {
-               addr = 0x1000 + (i * 0x10);                     /* CSx */
-               staddr = __raw_readl(base + addr + 0x08);       /* STADDRx */
-               /* figure out the decoded range of this CS */
-               start = (staddr << 4) & 0xfffc0000;
-               mask = (staddr << 18) & 0xfffc0000;
-               end = (start | (start - 1)) & ~(start ^ mask);
-               if ((nand_base >= start) && (nand_base < end))
-                       return i;
-       }
-
-       return -ENODEV;
+       return ret;
 }
 
+static const struct nand_controller_ops au1550nd_ops = {
+       .exec_op = au1550nd_exec_op,
+};
+
 static int au1550nd_probe(struct platform_device *pdev)
 {
        struct au1550nd_platdata *pd;
@@ -424,23 +291,15 @@ static int au1550nd_probe(struct platform_device *pdev)
        }
        ctx->cs = cs;
 
-       this->legacy.dev_ready = au1550_device_ready;
-       this->legacy.select_chip = au1550_select_chip;
-       this->legacy.cmdfunc = au1550_command;
-
-       /* 30 us command delay time */
-       this->legacy.chip_delay = 30;
+       nand_controller_init(&ctx->controller);
+       ctx->controller.ops = &au1550nd_ops;
+       this->controller = &ctx->controller;
        this->ecc.mode = NAND_ECC_SOFT;
        this->ecc.algo = NAND_ECC_HAMMING;
 
        if (pd->devwidth)
                this->options |= NAND_BUSWIDTH_16;
 
-       this->legacy.read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
-       ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
-       this->legacy.write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
-       this->legacy.read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
-
        ret = nand_scan(this, 1);
        if (ret) {
                dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
@@ -466,8 +325,12 @@ static int au1550nd_remove(struct platform_device *pdev)
 {
        struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
        struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       struct nand_chip *chip = &ctx->chip;
+       int ret;
 
-       nand_release(&ctx->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        iounmap(ctx->base);
        release_mem_region(r->start, 0x1000);
        kfree(ctx);
index 8dae97c..dcc70d9 100644 (file)
@@ -60,8 +60,12 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
 static int bcm47xxnflash_remove(struct platform_device *pdev)
 {
        struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &nflash->nand_chip;
+       int ret;
 
-       nand_release(&nflash->nand_chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        return 0;
 }
index 8f9ffb4..44068e9 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
@@ -264,6 +263,7 @@ struct brcmnand_controller {
        const unsigned int      *block_sizes;
        unsigned int            max_page_size;
        const unsigned int      *page_sizes;
+       unsigned int            page_size_shift;
        unsigned int            max_oob;
        u32                     features;
 
@@ -338,8 +338,38 @@ enum brcmnand_reg {
        BRCMNAND_FC_BASE,
 };
 
-/* BRCMNAND v4.0 */
-static const u16 brcmnand_regs_v40[] = {
+/* BRCMNAND v2.1-v2.2 */
+static const u16 brcmnand_regs_v21[] = {
+       [BRCMNAND_CMD_START]            =  0x04,
+       [BRCMNAND_CMD_EXT_ADDRESS]      =  0x08,
+       [BRCMNAND_CMD_ADDRESS]          =  0x0c,
+       [BRCMNAND_INTFC_STATUS]         =  0x5c,
+       [BRCMNAND_CS_SELECT]            =  0x14,
+       [BRCMNAND_CS_XOR]               =  0x18,
+       [BRCMNAND_LL_OP]                =     0,
+       [BRCMNAND_CS0_BASE]             =  0x40,
+       [BRCMNAND_CS1_BASE]             =     0,
+       [BRCMNAND_CORR_THRESHOLD]       =     0,
+       [BRCMNAND_CORR_THRESHOLD_EXT]   =     0,
+       [BRCMNAND_UNCORR_COUNT]         =     0,
+       [BRCMNAND_CORR_COUNT]           =     0,
+       [BRCMNAND_CORR_EXT_ADDR]        =  0x60,
+       [BRCMNAND_CORR_ADDR]            =  0x64,
+       [BRCMNAND_UNCORR_EXT_ADDR]      =  0x68,
+       [BRCMNAND_UNCORR_ADDR]          =  0x6c,
+       [BRCMNAND_SEMAPHORE]            =  0x50,
+       [BRCMNAND_ID]                   =  0x54,
+       [BRCMNAND_ID_EXT]               =     0,
+       [BRCMNAND_LL_RDATA]             =     0,
+       [BRCMNAND_OOB_READ_BASE]        =  0x20,
+       [BRCMNAND_OOB_READ_10_BASE]     =     0,
+       [BRCMNAND_OOB_WRITE_BASE]       =  0x30,
+       [BRCMNAND_OOB_WRITE_10_BASE]    =     0,
+       [BRCMNAND_FC_BASE]              = 0x200,
+};
+
+/* BRCMNAND v3.3-v4.0 */
+static const u16 brcmnand_regs_v33[] = {
        [BRCMNAND_CMD_START]            =  0x04,
        [BRCMNAND_CMD_EXT_ADDRESS]      =  0x08,
        [BRCMNAND_CMD_ADDRESS]          =  0x0c,
@@ -536,6 +566,9 @@ enum {
        CFG_BUS_WIDTH                   = BIT(CFG_BUS_WIDTH_SHIFT),
        CFG_DEVICE_SIZE_SHIFT           = 24,
 
+       /* Only for v2.1 */
+       CFG_PAGE_SIZE_SHIFT_v2_1        = 30,
+
        /* Only for pre-v7.1 (with no CFG_EXT register) */
        CFG_PAGE_SIZE_SHIFT             = 20,
        CFG_BLK_SIZE_SHIFT              = 28,
@@ -571,12 +604,16 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
 {
        static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
        static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
-       static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
+       static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
+       static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
+       static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
+       static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
+       static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
 
        ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
 
-       /* Only support v4.0+? */
-       if (ctrl->nand_version < 0x0400) {
+       /* Only support v2.1+ */
+       if (ctrl->nand_version < 0x0201) {
                dev_err(ctrl->dev, "version %#x not supported\n",
                        ctrl->nand_version);
                return -ENODEV;
@@ -591,8 +628,10 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
                ctrl->reg_offsets = brcmnand_regs_v60;
        else if (ctrl->nand_version >= 0x0500)
                ctrl->reg_offsets = brcmnand_regs_v50;
-       else if (ctrl->nand_version >= 0x0400)
-               ctrl->reg_offsets = brcmnand_regs_v40;
+       else if (ctrl->nand_version >= 0x0303)
+               ctrl->reg_offsets = brcmnand_regs_v33;
+       else if (ctrl->nand_version >= 0x0201)
+               ctrl->reg_offsets = brcmnand_regs_v21;
 
        /* Chip-select stride */
        if (ctrl->nand_version >= 0x0701)
@@ -606,8 +645,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
        } else {
                ctrl->cs_offsets = brcmnand_cs_offsets;
 
-               /* v5.0 and earlier has a different CS0 offset layout */
-               if (ctrl->nand_version <= 0x0500)
+               /* v3.3-5.0 have a different CS0 offset layout */
+               if (ctrl->nand_version >= 0x0303 &&
+                   ctrl->nand_version <= 0x0500)
                        ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
        }
 
@@ -617,14 +657,32 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
                ctrl->max_page_size = 16 * 1024;
                ctrl->max_block_size = 2 * 1024 * 1024;
        } else {
-               ctrl->page_sizes = page_sizes;
+               if (ctrl->nand_version >= 0x0304)
+                       ctrl->page_sizes = page_sizes_v3_4;
+               else if (ctrl->nand_version >= 0x0202)
+                       ctrl->page_sizes = page_sizes_v2_2;
+               else
+                       ctrl->page_sizes = page_sizes_v2_1;
+
+               if (ctrl->nand_version >= 0x0202)
+                       ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
+               else
+                       ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
+
                if (ctrl->nand_version >= 0x0600)
                        ctrl->block_sizes = block_sizes_v6;
-               else
+               else if (ctrl->nand_version >= 0x0400)
                        ctrl->block_sizes = block_sizes_v4;
+               else if (ctrl->nand_version >= 0x0202)
+                       ctrl->block_sizes = block_sizes_v2_2;
+               else
+                       ctrl->block_sizes = block_sizes_v2_1;
 
                if (ctrl->nand_version < 0x0400) {
-                       ctrl->max_page_size = 4096;
+                       if (ctrl->nand_version < 0x0202)
+                               ctrl->max_page_size = 2048;
+                       else
+                               ctrl->max_page_size = 4096;
                        ctrl->max_block_size = 512 * 1024;
                }
        }
@@ -810,6 +868,9 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
        enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
        int cs = host->cs;
 
+       if (!ctrl->reg_offsets[reg])
+               return;
+
        if (ctrl->nand_version == 0x0702)
                bits = 7;
        else if (ctrl->nand_version >= 0x0600)
@@ -868,8 +929,10 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
                return GENMASK(7, 0);
        else if (ctrl->nand_version >= 0x0600)
                return GENMASK(6, 0);
-       else
+       else if (ctrl->nand_version >= 0x0303)
                return GENMASK(5, 0);
+       else
+               return GENMASK(4, 0);
 }
 
 #define NAND_ACC_CONTROL_ECC_SHIFT     16
@@ -1100,30 +1163,30 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
        struct brcmnand_cfg *cfg = &host->hwcfg;
        int sas = cfg->spare_area_size << cfg->sector_size_1k;
        int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+       u32 next;
 
-       if (section >= sectors * 2)
+       if (section > sectors)
                return -ERANGE;
 
-       oobregion->offset = (section / 2) * sas;
+       next = (section * sas);
+       if (section < sectors)
+               next += 6;
 
-       if (section & 1) {
-               oobregion->offset += 9;
-               oobregion->length = 7;
+       if (section) {
+               oobregion->offset = ((section - 1) * sas) + 9;
        } else {
-               oobregion->length = 6;
-
-               /* First sector of each page may have BBI */
-               if (!section) {
-                       /*
-                        * Small-page NAND use byte 6 for BBI while large-page
-                        * NAND use byte 0.
-                        */
-                       if (cfg->page_size > 512)
-                               oobregion->offset++;
-                       oobregion->length--;
+               if (cfg->page_size > 512) {
+                       /* Large page NAND uses first 2 bytes for BBI */
+                       oobregion->offset = 2;
+               } else {
+                       /* Small page NAND uses last byte before ECC for BBI */
+                       oobregion->offset = 0;
+                       next--;
                }
        }
 
+       oobregion->length = next - oobregion->offset;
+
        return 0;
 }
 
@@ -2018,28 +2081,31 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
 static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
                  struct nand_chip *chip, void *buf, u64 addr)
 {
-       int i, sas;
-       void *oob = chip->oob_poi;
+       struct mtd_oob_region ecc;
+       int i;
        int bitflips = 0;
        int page = addr >> chip->page_shift;
        int ret;
+       void *ecc_bytes;
        void *ecc_chunk;
 
        if (!buf)
                buf = nand_get_data_buf(chip);
 
-       sas = mtd->oobsize / chip->ecc.steps;
-
        /* read without ecc for verification */
        ret = chip->ecc.read_page_raw(chip, buf, true, page);
        if (ret)
                return ret;
 
-       for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+       for (i = 0; i < chip->ecc.steps; i++) {
                ecc_chunk = buf + chip->ecc.size * i;
-               ret = nand_check_erased_ecc_chunk(ecc_chunk,
-                                                 chip->ecc.size,
-                                                 oob, sas, NULL, 0,
+
+               mtd_ooblayout_ecc(mtd, i, &ecc);
+               ecc_bytes = chip->oob_poi + ecc.offset;
+
+               ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
+                                                 ecc_bytes, ecc.length,
+                                                 NULL, 0,
                                                  chip->ecc.strength);
                if (ret < 0)
                        return ret;
@@ -2377,7 +2443,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
                (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
                (device_size << CFG_DEVICE_SIZE_SHIFT);
        if (cfg_offs == cfg_ext_offs) {
-               tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
+               tmp |= (page_size << ctrl->page_size_shift) |
                       (block_size << CFG_BLK_SIZE_SHIFT);
                nand_writereg(ctrl, cfg_offs, tmp);
        } else {
@@ -2389,9 +2455,11 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
 
        tmp = nand_readreg(ctrl, acc_control_offs);
        tmp &= ~brcmnand_ecc_level_mask(ctrl);
-       tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
        tmp &= ~brcmnand_spare_area_mask(ctrl);
-       tmp |= cfg->spare_area_size;
+       if (ctrl->nand_version >= 0x0302) {
+               tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+               tmp |= cfg->spare_area_size;
+       }
        nand_writereg(ctrl, acc_control_offs, tmp);
 
        brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
@@ -2577,7 +2645,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
         * to/from, and have nand_base pass us a bounce buffer instead, as
         * needed.
         */
-       chip->options |= NAND_USE_BOUNCE_BUFFER;
+       chip->options |= NAND_USES_DMA;
 
        if (chip->bbt_options & NAND_BBT_USE_FLASH)
                chip->bbt_options |= NAND_BBT_NO_OOB;
@@ -2764,6 +2832,8 @@ const struct dev_pm_ops brcmnand_pm_ops = {
 EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
 
 static const struct of_device_id brcmnand_of_match[] = {
+       { .compatible = "brcm,brcmnand-v2.1" },
+       { .compatible = "brcm,brcmnand-v2.2" },
        { .compatible = "brcm,brcmnand-v4.0" },
        { .compatible = "brcm,brcmnand-v5.0" },
        { .compatible = "brcm,brcmnand-v6.0" },
@@ -3045,9 +3115,15 @@ int brcmnand_remove(struct platform_device *pdev)
 {
        struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
        struct brcmnand_host *host;
+       struct nand_chip *chip;
+       int ret;
 
-       list_for_each_entry(host, &ctrl->host_list, node)
-               nand_release(&host->chip);
+       list_for_each_entry(host, &ctrl->host_list, node) {
+               chip = &host->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+       }
 
        clk_disable_unprepare(ctrl->clk);
 
index efddc5c..c405722 100644 (file)
@@ -2223,10 +2223,12 @@ static int cadence_nand_exec_op(struct nand_chip *chip,
                                const struct nand_operation *op,
                                bool check_only)
 {
-       int status = cadence_nand_select_target(chip);
+       if (!check_only) {
+               int status = cadence_nand_select_target(chip);
 
-       if (status)
-               return status;
+               if (status)
+                       return status;
+       }
 
        return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
                                      check_only);
@@ -2592,7 +2594,7 @@ cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
        return 0;
 }
 
-int cadence_nand_attach_chip(struct nand_chip *chip)
+static int cadence_nand_attach_chip(struct nand_chip *chip)
 {
        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
        struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
@@ -2778,9 +2780,14 @@ static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
 static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
 {
        struct cdns_nand_chip *entry, *temp;
+       struct nand_chip *chip;
+       int ret;
 
        list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
-               nand_release(&entry->chip);
+               chip = &entry->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
                list_del(&entry->node);
        }
 }
index 2d1c22d..9217379 100644 (file)
@@ -546,11 +546,6 @@ static int cafe_nand_write_page_lowlevel(struct nand_chip *chip,
        return nand_prog_page_end_op(chip);
 }
 
-static int cafe_nand_block_bad(struct nand_chip *chip, loff_t ofs)
-{
-       return 0;
-}
-
 /* F_2[X]/(X**6+X+1)  */
 static unsigned short gf64_mul(u8 a, u8 b)
 {
@@ -718,10 +713,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
        /* Enable the following for a flash based bad block table */
        cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
 
-       if (skipbbt) {
-               cafe->nand.options |= NAND_SKIP_BBTSCAN;
-               cafe->nand.legacy.block_bad = cafe_nand_block_bad;
-       }
+       if (skipbbt)
+               cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
 
        if (numtimings && numtimings != 3) {
                dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
@@ -814,11 +807,14 @@ static void cafe_nand_remove(struct pci_dev *pdev)
        struct mtd_info *mtd = pci_get_drvdata(pdev);
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct cafe_priv *cafe = nand_get_controller_data(chip);
+       int ret;
 
        /* Disable NAND IRQ in global IRQ mask register */
        cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
        free_irq(pdev->irq, mtd);
-       nand_release(chip);
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(chip);
        free_rs(cafe->rs);
        pci_iounmap(pdev, cafe->mmio);
        dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
deleted file mode 100644 (file)
index 045b617..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  Copyright (C) 2006 Compulab, Ltd.
- *  Mike Rapoport <mike@compulab.co.il>
- *
- *  Derived from drivers/mtd/nand/h1910.c (removed in v3.10)
- *       Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *       Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   CM-X270 board.
- */
-
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include <mach/pxa2xx-regs.h>
-
-#define GPIO_NAND_CS   (11)
-#define GPIO_NAND_RB   (89)
-
-/* MTD structure for CM-X270 board */
-static struct mtd_info *cmx270_nand_mtd;
-
-/* remaped IO address of the device */
-static void __iomem *cmx270_nand_io;
-
-/*
- * Define static partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
-       [0] = {
-               .name   = "cmx270-0",
-               .offset = 0,
-               .size   = MTDPART_SIZ_FULL
-       }
-};
-#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-
-static u_char cmx270_read_byte(struct nand_chip *this)
-{
-       return (readl(this->legacy.IO_ADDR_R) >> 16);
-}
-
-static void cmx270_write_buf(struct nand_chip *this, const u_char *buf,
-                            int len)
-{
-       int i;
-
-       for (i=0; i<len; i++)
-               writel((*buf++ << 16), this->legacy.IO_ADDR_W);
-}
-
-static void cmx270_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
-       int i;
-
-       for (i=0; i<len; i++)
-               *buf++ = readl(this->legacy.IO_ADDR_R) >> 16;
-}
-
-static inline void nand_cs_on(void)
-{
-       gpio_set_value(GPIO_NAND_CS, 0);
-}
-
-static void nand_cs_off(void)
-{
-       dsb();
-
-       gpio_set_value(GPIO_NAND_CS, 1);
-}
-
-/*
- *     hardware specific access to control-lines
- */
-static void cmx270_hwcontrol(struct nand_chip *this, int dat,
-                            unsigned int ctrl)
-{
-       unsigned int nandaddr = (unsigned int)this->legacy.IO_ADDR_W;
-
-       dsb();
-
-       if (ctrl & NAND_CTRL_CHANGE) {
-               if ( ctrl & NAND_ALE )
-                       nandaddr |=  (1 << 3);
-               else
-                       nandaddr &= ~(1 << 3);
-               if ( ctrl & NAND_CLE )
-                       nandaddr |=  (1 << 2);
-               else
-                       nandaddr &= ~(1 << 2);
-               if ( ctrl & NAND_NCE )
-                       nand_cs_on();
-               else
-                       nand_cs_off();
-       }
-
-       dsb();
-       this->legacy.IO_ADDR_W = (void __iomem*)nandaddr;
-       if (dat != NAND_CMD_NONE)
-               writel((dat << 16), this->legacy.IO_ADDR_W);
-
-       dsb();
-}
-
-/*
- *     read device ready pin
- */
-static int cmx270_device_ready(struct nand_chip *this)
-{
-       dsb();
-
-       return (gpio_get_value(GPIO_NAND_RB));
-}
-
-/*
- * Main initialization routine
- */
-static int __init cmx270_init(void)
-{
-       struct nand_chip *this;
-       int ret;
-
-       if (!(machine_is_armcore() && cpu_is_pxa27x()))
-               return -ENODEV;
-
-       ret = gpio_request(GPIO_NAND_CS, "NAND CS");
-       if (ret) {
-               pr_warn("CM-X270: failed to request NAND CS gpio\n");
-               return ret;
-       }
-
-       gpio_direction_output(GPIO_NAND_CS, 1);
-
-       ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
-       if (ret) {
-               pr_warn("CM-X270: failed to request NAND R/B gpio\n");
-               goto err_gpio_request;
-       }
-
-       gpio_direction_input(GPIO_NAND_RB);
-
-       /* Allocate memory for MTD device structure and private data */
-       this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-       if (!this) {
-               ret = -ENOMEM;
-               goto err_kzalloc;
-       }
-
-       cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
-       if (!cmx270_nand_io) {
-               pr_debug("Unable to ioremap NAND device\n");
-               ret = -EINVAL;
-               goto err_ioremap;
-       }
-
-       cmx270_nand_mtd = nand_to_mtd(this);
-
-       /* Link the private data with the MTD structure */
-       cmx270_nand_mtd->owner = THIS_MODULE;
-
-       /* insert callbacks */
-       this->legacy.IO_ADDR_R = cmx270_nand_io;
-       this->legacy.IO_ADDR_W = cmx270_nand_io;
-       this->legacy.cmd_ctrl = cmx270_hwcontrol;
-       this->legacy.dev_ready = cmx270_device_ready;
-
-       /* 15 us command delay time */
-       this->legacy.chip_delay = 20;
-       this->ecc.mode = NAND_ECC_SOFT;
-       this->ecc.algo = NAND_ECC_HAMMING;
-
-       /* read/write functions */
-       this->legacy.read_byte = cmx270_read_byte;
-       this->legacy.read_buf = cmx270_read_buf;
-       this->legacy.write_buf = cmx270_write_buf;
-
-       /* Scan to find existence of the device */
-       ret = nand_scan(this, 1);
-       if (ret) {
-               pr_notice("No NAND device\n");
-               goto err_scan;
-       }
-
-       /* Register the partitions */
-       ret = mtd_device_register(cmx270_nand_mtd, partition_info,
-                                 NUM_PARTITIONS);
-       if (ret)
-               goto err_scan;
-
-       /* Return happy */
-       return 0;
-
-err_scan:
-       iounmap(cmx270_nand_io);
-err_ioremap:
-       kfree(this);
-err_kzalloc:
-       gpio_free(GPIO_NAND_RB);
-err_gpio_request:
-       gpio_free(GPIO_NAND_CS);
-
-       return ret;
-
-}
-module_init(cmx270_init);
-
-/*
- * Clean up routine
- */
-static void __exit cmx270_cleanup(void)
-{
-       /* Release resources, unregister device */
-       nand_release(mtd_to_nand(cmx270_nand_mtd));
-
-       gpio_free(GPIO_NAND_RB);
-       gpio_free(GPIO_NAND_CS);
-
-       iounmap(cmx270_nand_io);
-
-       kfree(mtd_to_nand(cmx270_nand_mtd));
-}
-module_exit(cmx270_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
-MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
index e2322ce..9472bf7 100644 (file)
@@ -21,9 +21,9 @@
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
+#include <linux/iopoll.h>
 
 #include <asm/msr.h>
-#include <asm/io.h>
 
 #define NR_CS553X_CONTROLLERS  4
 
 #define CS_NAND_ECC_CLRECC     (1<<1)
 #define CS_NAND_ECC_ENECC      (1<<0)
 
-static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len)
+struct cs553x_nand_controller {
+       struct nand_controller base;
+       struct nand_chip chip;
+       void __iomem *mmio;
+};
+
+static struct cs553x_nand_controller *
+to_cs553x(struct nand_controller *controller)
+{
+       return container_of(controller, struct cs553x_nand_controller, base);
+}
+
+static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
+                                 u32 ctl, u8 data)
 {
+       u8 status;
+       int ret;
+
+       writeb(ctl, cs553x->mmio + MM_NAND_CTL);
+       writeb(data, cs553x->mmio + MM_NAND_IO);
+       ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
+                                       !(status & CS_NAND_CTLR_BUSY), 1,
+                                       100000);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
+                          unsigned int len)
+{
+       writeb(0, cs553x->mmio + MM_NAND_CTL);
        while (unlikely(len > 0x800)) {
-               memcpy_fromio(buf, this->legacy.IO_ADDR_R, 0x800);
+               memcpy_fromio(buf, cs553x->mmio, 0x800);
                buf += 0x800;
                len -= 0x800;
        }
-       memcpy_fromio(buf, this->legacy.IO_ADDR_R, len);
+       memcpy_fromio(buf, cs553x->mmio, len);
 }
 
-static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len)
+static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
+                           const void *buf, unsigned int len)
 {
+       writeb(0, cs553x->mmio + MM_NAND_CTL);
        while (unlikely(len > 0x800)) {
-               memcpy_toio(this->legacy.IO_ADDR_R, buf, 0x800);
+               memcpy_toio(cs553x->mmio, buf, 0x800);
                buf += 0x800;
                len -= 0x800;
        }
-       memcpy_toio(this->legacy.IO_ADDR_R, buf, len);
+       memcpy_toio(cs553x->mmio, buf, len);
 }
 
-static unsigned char cs553x_read_byte(struct nand_chip *this)
+static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
+                            unsigned int timeout_ms)
 {
-       return readb(this->legacy.IO_ADDR_R);
+       u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
+       u8 status;
+
+       return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
+                                 (status & mask) == CS_NAND_STS_FLASH_RDY, 100,
+                                 timeout_ms * 1000);
 }
 
-static void cs553x_write_byte(struct nand_chip *this, u_char byte)
+static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
+                            const struct nand_op_instr *instr)
 {
-       int i = 100000;
+       unsigned int i;
+       int ret = 0;
+
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
+                                            instr->ctx.cmd.opcode);
+               break;
+
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+                       ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
+                                                    instr->ctx.addr.addrs[i]);
+                       if (ret)
+                               break;
+               }
+               break;
+
+       case NAND_OP_DATA_IN_INSTR:
+               cs553x_data_in(cs553x, instr->ctx.data.buf.in,
+                              instr->ctx.data.len);
+               break;
+
+       case NAND_OP_DATA_OUT_INSTR:
+               cs553x_data_out(cs553x, instr->ctx.data.buf.out,
+                               instr->ctx.data.len);
+               break;
 
-       while (i && readb(this->legacy.IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
-               udelay(1);
-               i--;
+       case NAND_OP_WAITRDY_INSTR:
+               ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
+               break;
        }
-       writeb(byte, this->legacy.IO_ADDR_W + 0x801);
+
+       if (instr->delay_ns)
+               ndelay(instr->delay_ns);
+
+       return ret;
 }
 
-static void cs553x_hwcontrol(struct nand_chip *this, int cmd,
-                            unsigned int ctrl)
+static int cs553x_exec_op(struct nand_chip *this,
+                         const struct nand_operation *op,
+                         bool check_only)
 {
-       void __iomem *mmio_base = this->legacy.IO_ADDR_R;
-       if (ctrl & NAND_CTRL_CHANGE) {
-               unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
-               writeb(ctl, mmio_base + MM_NAND_CTL);
+       struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+       unsigned int i;
+       int ret;
+
+       if (check_only)
+               return true;
+
+       /* De-assert the CE pin */
+       writeb(0, cs553x->mmio + MM_NAND_CTL);
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
+               if (ret)
+                       break;
        }
-       if (cmd != NAND_CMD_NONE)
-               cs553x_write_byte(this, cmd);
-}
 
-static int cs553x_device_ready(struct nand_chip *this)
-{
-       void __iomem *mmio_base = this->legacy.IO_ADDR_R;
-       unsigned char foo = readb(mmio_base + MM_NAND_STS);
+       /* Re-assert the CE pin. */
+       writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
 
-       return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
+       return ret;
 }
 
 static void cs_enable_hwecc(struct nand_chip *this, int mode)
 {
-       void __iomem *mmio_base = this->legacy.IO_ADDR_R;
+       struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
 
-       writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
+       writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
 }
 
 static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
                            u_char *ecc_code)
 {
+       struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
        uint32_t ecc;
-       void __iomem *mmio_base = this->legacy.IO_ADDR_R;
 
-       ecc = readl(mmio_base + MM_NAND_STS);
+       ecc = readl(cs553x->mmio + MM_NAND_STS);
 
        ecc_code[1] = ecc >> 8;
        ecc_code[0] = ecc >> 16;
@@ -166,10 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
        return 0;
 }
 
-static struct mtd_info *cs553x_mtd[4];
+static struct cs553x_nand_controller *controllers[4];
+
+static const struct nand_controller_ops cs553x_nand_controller_ops = {
+       .exec_op = cs553x_exec_op,
+};
 
 static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
 {
+       struct cs553x_nand_controller *controller;
        int err = 0;
        struct nand_chip *this;
        struct mtd_info *new_mtd;
@@ -183,33 +263,29 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
        }
 
        /* Allocate memory for MTD device structure and private data */
-       this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-       if (!this) {
+       controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+       if (!controller) {
                err = -ENOMEM;
                goto out;
        }
 
+       this = &controller->chip;
+       nand_controller_init(&controller->base);
+       controller->base.ops = &cs553x_nand_controller_ops;
+       this->controller = &controller->base;
        new_mtd = nand_to_mtd(this);
 
        /* Link the private data with the MTD structure */
        new_mtd->owner = THIS_MODULE;
 
        /* map physical address */
-       this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = ioremap(adr, 4096);
-       if (!this->legacy.IO_ADDR_R) {
+       controller->mmio = ioremap(adr, 4096);
+       if (!controller->mmio) {
                pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
                err = -EIO;
                goto out_mtd;
        }
 
-       this->legacy.cmd_ctrl = cs553x_hwcontrol;
-       this->legacy.dev_ready = cs553x_device_ready;
-       this->legacy.read_byte = cs553x_read_byte;
-       this->legacy.read_buf = cs553x_read_buf;
-       this->legacy.write_buf = cs553x_write_buf;
-
-       this->legacy.chip_delay = 0;
-
        this->ecc.mode = NAND_ECC_HW;
        this->ecc.size = 256;
        this->ecc.bytes = 3;
@@ -232,15 +308,15 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
        if (err)
                goto out_free;
 
-       cs553x_mtd[cs] = new_mtd;
+       controllers[cs] = controller;
        goto out;
 
 out_free:
        kfree(new_mtd->name);
 out_ior:
-       iounmap(this->legacy.IO_ADDR_R);
+       iounmap(controller->mmio);
 out_mtd:
-       kfree(this);
+       kfree(controller);
 out:
        return err;
 }
@@ -295,9 +371,10 @@ static int __init cs553x_init(void)
        /* Register all devices together here. This means we can easily hack it to
           do mtdconcat etc. if we want to. */
        for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-               if (cs553x_mtd[i]) {
+               if (controllers[i]) {
                        /* If any devices registered, return success. Else the last error. */
-                       mtd_device_register(cs553x_mtd[i], NULL, 0);
+                       mtd_device_register(nand_to_mtd(&controllers[i]->chip),
+                                           NULL, 0);
                        err = 0;
                }
        }
@@ -312,26 +389,26 @@ static void __exit cs553x_cleanup(void)
        int i;
 
        for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-               struct mtd_info *mtd = cs553x_mtd[i];
-               struct nand_chip *this;
-               void __iomem *mmio_base;
+               struct cs553x_nand_controller *controller = controllers[i];
+               struct nand_chip *this = &controller->chip;
+               struct mtd_info *mtd = nand_to_mtd(this);
+               int ret;
 
                if (!mtd)
                        continue;
 
-               this = mtd_to_nand(mtd);
-               mmio_base = this->legacy.IO_ADDR_R;
-
                /* Release resources, unregister device */
-               nand_release(this);
+               ret = mtd_device_unregister(mtd);
+               WARN_ON(ret);
+               nand_cleanup(this);
                kfree(mtd->name);
-               cs553x_mtd[i] = NULL;
+               controllers[i] = NULL;
 
                /* unmap physical address */
-               iounmap(mmio_base);
+               iounmap(controller->mmio);
 
                /* Free the MTD device structure */
-               kfree(this);
+               kfree(controller);
        }
 }
 
index 25c185b..d975a62 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/slab.h>
@@ -38,6 +38,7 @@
  * outputs in a "wire-AND" configuration, with no per-chip signals.
  */
 struct davinci_nand_info {
+       struct nand_controller  controller;
        struct nand_chip        chip;
 
        struct platform_device  *pdev;
@@ -80,46 +81,6 @@ static inline void davinci_nand_writel(struct davinci_nand_info *info,
 
 /*----------------------------------------------------------------------*/
 
-/*
- * Access to hardware control lines:  ALE, CLE, secondary chipselect.
- */
-
-static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd,
-                                  unsigned int ctrl)
-{
-       struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
-       void __iomem                    *addr = info->current_cs;
-
-       /* Did the control lines change? */
-       if (ctrl & NAND_CTRL_CHANGE) {
-               if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
-                       addr += info->mask_cle;
-               else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
-                       addr += info->mask_ale;
-
-               nand->legacy.IO_ADDR_W = addr;
-       }
-
-       if (cmd != NAND_CMD_NONE)
-               iowrite8(cmd, nand->legacy.IO_ADDR_W);
-}
-
-static void nand_davinci_select_chip(struct nand_chip *nand, int chip)
-{
-       struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
-
-       info->current_cs = info->vaddr;
-
-       /* maybe kick in a second chipselect */
-       if (chip > 0)
-               info->current_cs += info->mask_chipsel;
-
-       info->chip.legacy.IO_ADDR_W = info->current_cs;
-       info->chip.legacy.IO_ADDR_R = info->chip.legacy.IO_ADDR_W;
-}
-
-/*----------------------------------------------------------------------*/
-
 /*
  * 1-bit hardware ECC ... context maintained for each core chipselect
  */
@@ -410,48 +371,75 @@ correct:
        return corrected;
 }
 
-/*----------------------------------------------------------------------*/
-
-/*
- * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
- * how these chips are normally wired.  This translates to both 8 and 16
- * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
+/**
+ * nand_read_page_hwecc_oob_first - hw ecc, read oob first
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
  *
- * For now we assume that configuration, or any other one which ignores
- * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
- * and have that transparently morphed into multiple NAND operations.
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
  */
-static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf,
-                                 int len)
+static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
+                                                 uint8_t *buf,
+                                                 int oob_required, int page)
 {
-       if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
-               ioread32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
-       else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
-               ioread16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
-       else
-               ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
-}
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       int i, eccsize = chip->ecc.size, ret;
+       int eccbytes = chip->ecc.bytes;
+       int eccsteps = chip->ecc.steps;
+       uint8_t *p = buf;
+       uint8_t *ecc_code = chip->ecc.code_buf;
+       uint8_t *ecc_calc = chip->ecc.calc_buf;
+       unsigned int max_bitflips = 0;
+
+       /* Read the OOB area first */
+       ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+       if (ret)
+               return ret;
 
-static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf,
-                                  int len)
-{
-       if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
-               iowrite32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
-       else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
-               iowrite16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
-       else
-               iowrite8_rep(chip->legacy.IO_ADDR_R, buf, len);
-}
+       ret = nand_read_page_op(chip, page, 0, NULL, 0);
+       if (ret)
+               return ret;
 
-/*
- * Check hardware register for wait status. Returns 1 if device is ready,
- * 0 if it is still busy.
- */
-static int nand_davinci_dev_ready(struct nand_chip *chip)
-{
-       struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+       ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+                                        chip->ecc.total);
+       if (ret)
+               return ret;
+
+       for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+               int stat;
+
+               chip->ecc.hwctl(chip, NAND_ECC_READ);
 
-       return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
+               ret = nand_read_data_op(chip, p, eccsize, false, false);
+               if (ret)
+                       return ret;
+
+               chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+               stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+               if (stat == -EBADMSG &&
+                   (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+                       /* check for empty pages with bitflips */
+                       stat = nand_check_erased_ecc_chunk(p, eccsize,
+                                                          &ecc_code[i],
+                                                          eccbytes, NULL, 0,
+                                                          chip->ecc.strength);
+               }
+
+               if (stat < 0) {
+                       mtd->ecc_stats.failed++;
+               } else {
+                       mtd->ecc_stats.corrected += stat;
+                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
+               }
+       }
+       return max_bitflips;
 }
 
 /*----------------------------------------------------------------------*/
@@ -613,6 +601,13 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
                break;
        case NAND_ECC_HW:
                if (pdata->ecc_bits == 4) {
+                       int chunks = mtd->writesize / 512;
+
+                       if (!chunks || mtd->oobsize < 16) {
+                               dev_dbg(&info->pdev->dev, "too small\n");
+                               return -EINVAL;
+                       }
+
                        /*
                         * No sanity checks:  CPUs must support this,
                         * and the chips may not use NAND_BUSWIDTH_16.
@@ -635,6 +630,26 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
                        info->chip.ecc.bytes = 10;
                        info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
                        info->chip.ecc.algo = NAND_ECC_BCH;
+
+                       /*
+                        * Update ECC layout if needed ... for 1-bit HW ECC, the
+                        * default is OK, but it allocates 6 bytes when only 3
+                        * are needed (for each 512 bytes). For 4-bit HW ECC,
+                        * the default is not usable: 10 bytes needed, not 6.
+                        *
+                        * For small page chips, preserve the manufacturer's
+                        * badblock marking data ... and make sure a flash BBT
+                        * table marker fits in the free bytes.
+                        */
+                       if (chunks == 1) {
+                               mtd_set_ooblayout(mtd,
+                                                 &hwecc4_small_ooblayout_ops);
+                       } else if (chunks == 4 || chunks == 8) {
+                               mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+                               info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+                       } else {
+                               return -EIO;
+                       }
                } else {
                        /* 1bit ecc hamming */
                        info->chip.ecc.calculate = nand_davinci_calculate_1bit;
@@ -650,39 +665,111 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
                return -EINVAL;
        }
 
-       /*
-        * Update ECC layout if needed ... for 1-bit HW ECC, the default
-        * is OK, but it allocates 6 bytes when only 3 are needed (for
-        * each 512 bytes).  For the 4-bit HW ECC, that default is not
-        * usable:  10 bytes are needed, not 6.
-        */
-       if (pdata->ecc_bits == 4) {
-               int chunks = mtd->writesize / 512;
+       return ret;
+}
 
-               if (!chunks || mtd->oobsize < 16) {
-                       dev_dbg(&info->pdev->dev, "too small\n");
-                       return -EINVAL;
-               }
+static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
+                                unsigned int len, bool force_8bit)
+{
+       u32 alignment = ((uintptr_t)buf | len) & 3;
 
-               /* For small page chips, preserve the manufacturer's
-                * badblock marking data ... and make sure a flash BBT
-                * table marker fits in the free bytes.
-                */
-               if (chunks == 1) {
-                       mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
-               } else if (chunks == 4 || chunks == 8) {
-                       mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-                       info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
-               } else {
-                       return -EIO;
+       if (force_8bit || (alignment & 1))
+               ioread8_rep(info->current_cs, buf, len);
+       else if (alignment & 3)
+               ioread16_rep(info->current_cs, buf, len >> 1);
+       else
+               ioread32_rep(info->current_cs, buf, len >> 2);
+}
+
+static void nand_davinci_data_out(struct davinci_nand_info *info,
+                                 const void *buf, unsigned int len,
+                                 bool force_8bit)
+{
+       u32 alignment = ((uintptr_t)buf | len) & 3;
+
+       if (force_8bit || (alignment & 1))
+               iowrite8_rep(info->current_cs, buf, len);
+       else if (alignment & 3)
+               iowrite16_rep(info->current_cs, buf, len >> 1);
+       else
+               iowrite32_rep(info->current_cs, buf, len >> 2);
+}
+
+static int davinci_nand_exec_instr(struct davinci_nand_info *info,
+                                  const struct nand_op_instr *instr)
+{
+       unsigned int i, timeout_us;
+       u32 status;
+       int ret;
+
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               iowrite8(instr->ctx.cmd.opcode,
+                        info->current_cs + info->mask_cle);
+               break;
+
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+                       iowrite8(instr->ctx.addr.addrs[i],
+                                info->current_cs + info->mask_ale);
                }
+               break;
+
+       case NAND_OP_DATA_IN_INSTR:
+               nand_davinci_data_in(info, instr->ctx.data.buf.in,
+                                    instr->ctx.data.len,
+                                    instr->ctx.data.force_8bit);
+               break;
+
+       case NAND_OP_DATA_OUT_INSTR:
+               nand_davinci_data_out(info, instr->ctx.data.buf.out,
+                                     instr->ctx.data.len,
+                                     instr->ctx.data.force_8bit);
+               break;
+
+       case NAND_OP_WAITRDY_INSTR:
+               timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+               ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
+                                                status, status & BIT(0), 100,
+                                                timeout_us);
+               if (ret)
+                       return ret;
+
+               break;
        }
 
-       return ret;
+       if (instr->delay_ns)
+               ndelay(instr->delay_ns);
+
+       return 0;
+}
+
+static int davinci_nand_exec_op(struct nand_chip *chip,
+                               const struct nand_operation *op,
+                               bool check_only)
+{
+       struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+       unsigned int i;
+
+       if (check_only)
+               return 0;
+
+       info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
+
+       for (i = 0; i < op->ninstrs; i++) {
+               int ret;
+
+               ret = davinci_nand_exec_instr(info, &op->instrs[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static const struct nand_controller_ops davinci_nand_controller_ops = {
        .attach_chip = davinci_nand_attach_chip,
+       .exec_op = davinci_nand_exec_op,
 };
 
 static int nand_davinci_probe(struct platform_device *pdev)
@@ -746,11 +833,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
        mtd->dev.parent         = &pdev->dev;
        nand_set_flash_node(&info->chip, pdev->dev.of_node);
 
-       info->chip.legacy.IO_ADDR_R     = vaddr;
-       info->chip.legacy.IO_ADDR_W     = vaddr;
-       info->chip.legacy.chip_delay    = 0;
-       info->chip.legacy.select_chip   = nand_davinci_select_chip;
-
        /* options such as NAND_BBT_USE_FLASH */
        info->chip.bbt_options  = pdata->bbt_options;
        /* options such as 16-bit widths */
@@ -767,14 +849,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
        info->mask_ale          = pdata->mask_ale ? : MASK_ALE;
        info->mask_cle          = pdata->mask_cle ? : MASK_CLE;
 
-       /* Set address of hardware control function */
-       info->chip.legacy.cmd_ctrl      = nand_davinci_hwcontrol;
-       info->chip.legacy.dev_ready     = nand_davinci_dev_ready;
-
-       /* Speed up buffer I/O */
-       info->chip.legacy.read_buf     = nand_davinci_read_buf;
-       info->chip.legacy.write_buf    = nand_davinci_write_buf;
-
        /* Use board-specific ECC config */
        info->chip.ecc.mode     = pdata->ecc_mode;
 
@@ -788,7 +862,9 @@ static int nand_davinci_probe(struct platform_device *pdev)
        spin_unlock_irq(&davinci_nand_lock);
 
        /* Scan to find existence of the device(s) */
-       info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops;
+       nand_controller_init(&info->controller);
+       info->controller.ops = &davinci_nand_controller_ops;
+       info->chip.controller = &info->controller;
        ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
        if (ret < 0) {
                dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
@@ -817,13 +893,17 @@ err_cleanup_nand:
 static int nand_davinci_remove(struct platform_device *pdev)
 {
        struct davinci_nand_info *info = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &info->chip;
+       int ret;
 
        spin_lock_irq(&davinci_nand_lock);
        if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
                ecc4_busy = false;
        spin_unlock_irq(&davinci_nand_lock);
 
-       nand_release(&info->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        return 0;
 }
index 6a6c919..4e6e157 100644 (file)
@@ -764,6 +764,7 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf,
 static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
                                       const struct nand_data_interface *conf)
 {
+       static const unsigned int data_setup_on_host = 10000;
        struct denali_controller *denali = to_denali_controller(chip);
        struct denali_chip_sel *sel;
        const struct nand_sdr_timings *timings;
@@ -796,15 +797,6 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
 
        sel = &to_denali_chip(chip)->sels[chipnr];
 
-       /* tREA -> ACC_CLKS */
-       acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
-       acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
-
-       tmp = ioread32(denali->reg + ACC_CLKS);
-       tmp &= ~ACC_CLKS__VALUE;
-       tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
-       sel->acc_clks = tmp;
-
        /* tRWH -> RE_2_WE */
        re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
        re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
@@ -862,14 +854,45 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
        tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
        sel->rdwr_en_hi_cnt = tmp;
 
-       /* tRP, tWP -> RDWR_EN_LO_CNT */
+       /*
+        * tREA -> ACC_CLKS
+        * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
+        */
+
+       /*
+        * Determine the minimum of acc_clks to meet the setup timing when
+        * capturing the incoming data.
+        *
+        * The delay on the chip side is well-defined as tREA, but we need to
+        * take additional delay into account. This includes a certain degree
+        * of unknowledge, such as signal propagation delays on the PCB and
+        * in the SoC, load capacity of the I/O pins, etc.
+        */
+       acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
+
+       /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
        rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
+
+       /* Extend rdwr_en_lo to meet the data hold timing */
+       rdwr_en_lo = max_t(int, rdwr_en_lo,
+                          acc_clks - timings->tRHOH_min / t_x);
+
+       /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
        rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
                                     t_x);
-       rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
        rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
        rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
 
+       /* Center the data latch timing for extra safety */
+       acc_clks = (acc_clks + rdwr_en_lo +
+                   DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
+       acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+       tmp = ioread32(denali->reg + ACC_CLKS);
+       tmp &= ~ACC_CLKS__VALUE;
+       tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
+       sel->acc_clks = tmp;
+
        tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
        tmp &= ~RDWR_EN_LO_CNT__VALUE;
        tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
@@ -1203,7 +1226,7 @@ int denali_chip_init(struct denali_controller *denali,
                mtd->name = "denali-nand";
 
        if (denali->dma_avail) {
-               chip->options |= NAND_USE_BOUNCE_BUFFER;
+               chip->options |= NAND_USES_DMA;
                chip->buf_align = 16;
        }
 
@@ -1336,10 +1359,17 @@ EXPORT_SYMBOL(denali_init);
 
 void denali_remove(struct denali_controller *denali)
 {
-       struct denali_chip *dchip;
+       struct denali_chip *dchip, *tmp;
+       struct nand_chip *chip;
+       int ret;
 
-       list_for_each_entry(dchip, &denali->chips, node)
-               nand_release(&dchip->chip);
+       list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
+               chip = &dchip->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+               list_del(&dchip->node);
+       }
 
        denali_disable_irq(denali);
 }
index c2a391a..4372186 100644 (file)
@@ -58,6 +58,7 @@ static unsigned long doc_locations[] __initdata = {
 static struct mtd_info *doclist = NULL;
 
 struct doc_priv {
+       struct nand_controller base;
        void __iomem *virtadr;
        unsigned long physadr;
        u_char ChipID;
@@ -69,6 +70,7 @@ struct doc_priv {
        int mh1_page;
        struct rs_control *rs_decoder;
        struct mtd_info *nextdoc;
+       bool supports_32b_reads;
 
        /* Handle the last stage of initialization (BBT scan, partitioning) */
        int (*late_init)(struct mtd_info *mtd);
@@ -84,10 +86,6 @@ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
 #define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
 #define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
 
-static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
-                             unsigned int bitmask);
-static void doc200x_select_chip(struct nand_chip *this, int chip);
-
 static int debug = 0;
 module_param(debug, int, 0);
 
@@ -302,20 +300,6 @@ static void doc2000_write_byte(struct nand_chip *this, u_char datum)
        WriteDOC(datum, docptr, 2k_CDSN_IO);
 }
 
-static u_char doc2000_read_byte(struct nand_chip *this)
-{
-       struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-       u_char ret;
-
-       ReadDOC(docptr, CDSNSlowIO);
-       DoC_Delay(doc, 2);
-       ret = ReadDOC(docptr, 2k_CDSN_IO);
-       if (debug)
-               printk("read_byte returns %02x\n", ret);
-       return ret;
-}
-
 static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
                             int len)
 {
@@ -337,33 +321,42 @@ static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
        void __iomem *docptr = doc->virtadr;
+       u32 *buf32 = (u32 *)buf;
        int i;
 
        if (debug)
                printk("readbuf of %d bytes: ", len);
 
-       for (i = 0; i < len; i++)
-               buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+       if (!doc->supports_32b_reads ||
+           ((((unsigned long)buf) | len) & 3)) {
+               for (i = 0; i < len; i++)
+                       buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+       } else {
+               for (i = 0; i < len / 4; i++)
+                       buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i);
+       }
 }
 
-static void doc2000_readbuf_dword(struct nand_chip *this, u_char *buf, int len)
+/*
+ * We need our own readid() here because it's called before the NAND chip
+ * has been initialized, and calling nand_op_readid() would lead to a NULL
+ * pointer exception when dereferencing the NAND timings.
+ */
+static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id)
 {
-       struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-       int i;
+       u8 addr = 0;
+       struct nand_op_instr instrs[] = {
+               NAND_OP_CMD(NAND_CMD_READID, 0),
+               NAND_OP_ADDR(1, &addr, 50),
+               NAND_OP_8BIT_DATA_IN(2, id, 0),
+       };
 
-       if (debug)
-               printk("readbuf_dword of %d bytes: ", len);
+       struct nand_operation op = NAND_OPERATION(cs, instrs);
 
-       if (unlikely((((unsigned long)buf) | len) & 3)) {
-               for (i = 0; i < len; i++) {
-                       *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
-               }
-       } else {
-               for (i = 0; i < len; i += 4) {
-                       *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
-               }
-       }
+       if (!id)
+               op.ninstrs--;
+
+       this->controller->ops->exec_op(this, &op, false);
 }
 
 static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
@@ -371,20 +364,11 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
        struct nand_chip *this = mtd_to_nand(mtd);
        struct doc_priv *doc = nand_get_controller_data(this);
        uint16_t ret;
+       u8 id[2];
 
-       doc200x_select_chip(this, nr);
-       doc200x_hwcontrol(this, NAND_CMD_READID,
-                         NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-       doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
-       doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+       doc200x_readid(this, nr, id);
 
-       /* We can't use dev_ready here, but at least we wait for the
-        * command to complete
-        */
-       udelay(50);
-
-       ret = this->legacy.read_byte(this) << 8;
-       ret |= this->legacy.read_byte(this);
+       ret = ((u16)id[0] << 8) | id[1];
 
        if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
                /* First chip probe. See if we get same results by 32-bit access */
@@ -394,18 +378,12 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
                } ident;
                void __iomem *docptr = doc->virtadr;
 
-               doc200x_hwcontrol(this, NAND_CMD_READID,
-                                 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-               doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
-               doc200x_hwcontrol(this, NAND_CMD_NONE,
-                                 NAND_NCE | NAND_CTRL_CHANGE);
-
-               udelay(50);
+               doc200x_readid(this, nr, NULL);
 
                ident.dword = readl(docptr + DoC_2k_CDSN_IO);
                if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
                        pr_info("DiskOnChip 2000 responds to DWORD access\n");
-                       this->legacy.read_buf = &doc2000_readbuf_dword;
+                       doc->supports_32b_reads = true;
                }
        }
 
@@ -434,20 +412,6 @@ static void __init doc2000_count_chips(struct mtd_info *mtd)
        pr_debug("Detected %d chips per floor.\n", i);
 }
 
-static int doc200x_wait(struct nand_chip *this)
-{
-       struct doc_priv *doc = nand_get_controller_data(this);
-
-       int status;
-
-       DoC_WaitReady(doc);
-       nand_status_op(this, NULL);
-       DoC_WaitReady(doc);
-       status = (int)this->legacy.read_byte(this);
-
-       return status;
-}
-
 static void doc2001_write_byte(struct nand_chip *this, u_char datum)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
@@ -458,19 +422,6 @@ static void doc2001_write_byte(struct nand_chip *this, u_char datum)
        WriteDOC(datum, docptr, WritePipeTerm);
 }
 
-static u_char doc2001_read_byte(struct nand_chip *this)
-{
-       struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-
-       //ReadDOC(docptr, CDSNSlowIO);
-       /* 11.4.5 -- delay twice to allow extended length cycle */
-       DoC_Delay(doc, 2);
-       ReadDOC(docptr, ReadPipeInit);
-       //return ReadDOC(docptr, Mil_CDSN_IO);
-       return ReadDOC(docptr, LastDataRead);
-}
-
 static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
@@ -499,20 +450,6 @@ static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len)
        buf[i] = ReadDOC(docptr, LastDataRead);
 }
 
-static u_char doc2001plus_read_byte(struct nand_chip *this)
-{
-       struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-       u_char ret;
-
-       ReadDOC(docptr, Mplus_ReadPipeInit);
-       ReadDOC(docptr, Mplus_ReadPipeInit);
-       ret = ReadDOC(docptr, Mplus_LastDataRead);
-       if (debug)
-               printk("read_byte returns %02x\n", ret);
-       return ret;
-}
-
 static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
@@ -550,9 +487,12 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
        }
 
        /* Terminate read pipeline */
-       buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
-       if (debug && i < 16)
-               printk("%02x ", buf[len - 2]);
+       if (len >= 2) {
+               buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+               if (debug && i < 16)
+                       printk("%02x ", buf[len - 2]);
+       }
+
        buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
        if (debug && i < 16)
                printk("%02x ", buf[len - 1]);
@@ -560,226 +500,163 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
                printk("\n");
 }
 
-static void doc2001plus_select_chip(struct nand_chip *this, int chip)
+static void doc200x_write_control(struct doc_priv *doc, u8 value)
+{
+       WriteDOC(value, doc->virtadr, CDSNControl);
+       /* 11.4.3 -- 4 NOPs after CSDNControl write */
+       DoC_Delay(doc, 4);
+}
+
+static void doc200x_exec_instr(struct nand_chip *this,
+                              const struct nand_op_instr *instr)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-       int floor = 0;
+       unsigned int i;
 
-       if (debug)
-               printk("select chip (%d)\n", chip);
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE);
+               doc2000_write_byte(this, instr->ctx.cmd.opcode);
+               break;
 
-       if (chip == -1) {
-               /* Disable flash internally */
-               WriteDOC(0, docptr, Mplus_FlashSelect);
-               return;
-       }
+       case NAND_OP_ADDR_INSTR:
+               doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE);
+               for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+                       u8 addr = instr->ctx.addr.addrs[i];
 
-       floor = chip / doc->chips_per_floor;
-       chip -= (floor * doc->chips_per_floor);
+                       if (DoC_is_2000(doc))
+                               doc2000_write_byte(this, addr);
+                       else
+                               doc2001_write_byte(this, addr);
+               }
+               break;
 
-       /* Assert ChipEnable and deassert WriteProtect */
-       WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
-       nand_reset_op(this);
+       case NAND_OP_DATA_IN_INSTR:
+               doc200x_write_control(doc, CDSN_CTRL_CE);
+               if (DoC_is_2000(doc))
+                       doc2000_readbuf(this, instr->ctx.data.buf.in,
+                                       instr->ctx.data.len);
+               else
+                       doc2001_readbuf(this, instr->ctx.data.buf.in,
+                                       instr->ctx.data.len);
+               break;
 
-       doc->curchip = chip;
-       doc->curfloor = floor;
+       case NAND_OP_DATA_OUT_INSTR:
+               doc200x_write_control(doc, CDSN_CTRL_CE);
+               if (DoC_is_2000(doc))
+                       doc2000_writebuf(this, instr->ctx.data.buf.out,
+                                        instr->ctx.data.len);
+               else
+                       doc2001_writebuf(this, instr->ctx.data.buf.out,
+                                        instr->ctx.data.len);
+               break;
+
+       case NAND_OP_WAITRDY_INSTR:
+               DoC_WaitReady(doc);
+               break;
+       }
+
+       if (instr->delay_ns)
+               ndelay(instr->delay_ns);
 }
 
-static void doc200x_select_chip(struct nand_chip *this, int chip)
+static int doc200x_exec_op(struct nand_chip *this,
+                          const struct nand_operation *op,
+                          bool check_only)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-       int floor = 0;
+       unsigned int i;
 
-       if (debug)
-               printk("select chip (%d)\n", chip);
+       if (check_only)
+               return true;
 
-       if (chip == -1)
-               return;
+       doc->curchip = op->cs % doc->chips_per_floor;
+       doc->curfloor = op->cs / doc->chips_per_floor;
 
-       floor = chip / doc->chips_per_floor;
-       chip -= (floor * doc->chips_per_floor);
+       WriteDOC(doc->curfloor, doc->virtadr, FloorSelect);
+       WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect);
 
-       /* 11.4.4 -- deassert CE before changing chip */
-       doc200x_hwcontrol(this, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+       /* Assert CE pin */
+       doc200x_write_control(doc, CDSN_CTRL_CE);
 
-       WriteDOC(floor, docptr, FloorSelect);
-       WriteDOC(chip, docptr, CDSNDeviceSelect);
+       for (i = 0; i < op->ninstrs; i++)
+               doc200x_exec_instr(this, &op->instrs[i]);
 
-       doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+       /* De-assert CE pin */
+       doc200x_write_control(doc, 0);
 
-       doc->curchip = chip;
-       doc->curfloor = floor;
+       return 0;
 }
 
-#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
-
-static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
-                             unsigned int ctrl)
+static void doc2001plus_write_pipe_term(struct doc_priv *doc)
 {
-       struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
-
-       if (ctrl & NAND_CTRL_CHANGE) {
-               doc->CDSNControl &= ~CDSN_CTRL_MSK;
-               doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
-               if (debug)
-                       printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
-               WriteDOC(doc->CDSNControl, docptr, CDSNControl);
-               /* 11.4.3 -- 4 NOPs after CSDNControl write */
-               DoC_Delay(doc, 4);
-       }
-       if (cmd != NAND_CMD_NONE) {
-               if (DoC_is_2000(doc))
-                       doc2000_write_byte(this, cmd);
-               else
-                       doc2001_write_byte(this, cmd);
-       }
+       WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+       WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
 }
 
-static void doc2001plus_command(struct nand_chip *this, unsigned command,
-                               int column, int page_addr)
+static void doc2001plus_exec_instr(struct nand_chip *this,
+                                  const struct nand_op_instr *instr)
 {
-       struct mtd_info *mtd = nand_to_mtd(this);
        struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
+       unsigned int i;
 
-       /*
-        * Must terminate write pipeline before sending any commands
-        * to the device.
-        */
-       if (command == NAND_CMD_PAGEPROG) {
-               WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-               WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-       }
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd);
+               doc2001plus_write_pipe_term(doc);
+               break;
 
-       /*
-        * Write out the command to the device.
-        */
-       if (command == NAND_CMD_SEQIN) {
-               int readcmd;
-
-               if (column >= mtd->writesize) {
-                       /* OOB area */
-                       column -= mtd->writesize;
-                       readcmd = NAND_CMD_READOOB;
-               } else if (column < 256) {
-                       /* First 256 bytes --> READ0 */
-                       readcmd = NAND_CMD_READ0;
-               } else {
-                       column -= 256;
-                       readcmd = NAND_CMD_READ1;
-               }
-               WriteDOC(readcmd, docptr, Mplus_FlashCmd);
-       }
-       WriteDOC(command, docptr, Mplus_FlashCmd);
-       WriteDOC(0, docptr, Mplus_WritePipeTerm);
-       WriteDOC(0, docptr, Mplus_WritePipeTerm);
-
-       if (column != -1 || page_addr != -1) {
-               /* Serially input address */
-               if (column != -1) {
-                       /* Adjust columns for 16 bit buswidth */
-                       if (this->options & NAND_BUSWIDTH_16 &&
-                                       !nand_opcode_8bits(command))
-                               column >>= 1;
-                       WriteDOC(column, docptr, Mplus_FlashAddress);
-               }
-               if (page_addr != -1) {
-                       WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
-                       WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
-                       if (this->options & NAND_ROW_ADDR_3) {
-                               WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
-                               printk("high density\n");
-                       }
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+                       u8 addr = instr->ctx.addr.addrs[i];
+
+                       WriteDOC(addr, doc->virtadr, Mplus_FlashAddress);
                }
-               WriteDOC(0, docptr, Mplus_WritePipeTerm);
-               WriteDOC(0, docptr, Mplus_WritePipeTerm);
+               doc2001plus_write_pipe_term(doc);
                /* deassert ALE */
-               if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
-                   command == NAND_CMD_READOOB || command == NAND_CMD_READID)
-                       WriteDOC(0, docptr, Mplus_FlashControl);
-       }
-
-       /*
-        * program and erase have their own busy handlers
-        * status and sequential in needs no delay
-        */
-       switch (command) {
-
-       case NAND_CMD_PAGEPROG:
-       case NAND_CMD_ERASE1:
-       case NAND_CMD_ERASE2:
-       case NAND_CMD_SEQIN:
-       case NAND_CMD_STATUS:
-               return;
+               WriteDOC(0, doc->virtadr, Mplus_FlashControl);
+               break;
 
-       case NAND_CMD_RESET:
-               if (this->legacy.dev_ready)
-                       break;
-               udelay(this->legacy.chip_delay);
-               WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
-               WriteDOC(0, docptr, Mplus_WritePipeTerm);
-               WriteDOC(0, docptr, Mplus_WritePipeTerm);
-               while (!(this->legacy.read_byte(this) & 0x40)) ;
-               return;
-
-               /* This applies to read commands */
-       default:
-               /*
-                * If we don't have access to the busy pin, we apply the given
-                * command delay
-                */
-               if (!this->legacy.dev_ready) {
-                       udelay(this->legacy.chip_delay);
-                       return;
-               }
+       case NAND_OP_DATA_IN_INSTR:
+               doc2001plus_readbuf(this, instr->ctx.data.buf.in,
+                                   instr->ctx.data.len);
+               break;
+       case NAND_OP_DATA_OUT_INSTR:
+               doc2001plus_writebuf(this, instr->ctx.data.buf.out,
+                                    instr->ctx.data.len);
+               doc2001plus_write_pipe_term(doc);
+               break;
+       case NAND_OP_WAITRDY_INSTR:
+               DoC_WaitReady(doc);
+               break;
        }
 
-       /* Apply this short delay always to ensure that we do wait tWB in
-        * any case on any machine. */
-       ndelay(100);
-       /* wait until command is processed */
-       while (!this->legacy.dev_ready(this)) ;
+       if (instr->delay_ns)
+               ndelay(instr->delay_ns);
 }
 
-static int doc200x_dev_ready(struct nand_chip *this)
+static int doc2001plus_exec_op(struct nand_chip *this,
+                              const struct nand_operation *op,
+                              bool check_only)
 {
        struct doc_priv *doc = nand_get_controller_data(this);
-       void __iomem *docptr = doc->virtadr;
+       unsigned int i;
 
-       if (DoC_is_MillenniumPlus(doc)) {
-               /* 11.4.2 -- must NOP four times before checking FR/B# */
-               DoC_Delay(doc, 4);
-               if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
-                       if (debug)
-                               printk("not ready\n");
-                       return 0;
-               }
-               if (debug)
-                       printk("was ready\n");
-               return 1;
-       } else {
-               /* 11.4.2 -- must NOP four times before checking FR/B# */
-               DoC_Delay(doc, 4);
-               if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
-                       if (debug)
-                               printk("not ready\n");
-                       return 0;
-               }
-               /* 11.4.2 -- Must NOP twice if it's ready */
-               DoC_Delay(doc, 2);
-               if (debug)
-                       printk("was ready\n");
-               return 1;
-       }
-}
+       if (check_only)
+               return true;
+
+       doc->curchip = op->cs % doc->chips_per_floor;
+       doc->curfloor = op->cs / doc->chips_per_floor;
+
+       /* Assert ChipEnable and deassert WriteProtect */
+       WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect);
+
+       for (i = 0; i < op->ninstrs; i++)
+               doc2001plus_exec_instr(this, &op->instrs[i]);
+
+       /* De-assert ChipEnable */
+       WriteDOC(0, doc->virtadr, Mplus_FlashSelect);
 
-static int doc200x_block_bad(struct nand_chip *this, loff_t ofs)
-{
-       /* This is our last resort if we couldn't find or create a BBT.  Just
-          pretend all blocks are good. */
        return 0;
 }
 
@@ -1344,9 +1221,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
        struct nand_chip *this = mtd_to_nand(mtd);
        struct doc_priv *doc = nand_get_controller_data(this);
 
-       this->legacy.read_byte = doc2000_read_byte;
-       this->legacy.write_buf = doc2000_writebuf;
-       this->legacy.read_buf = doc2000_readbuf;
        doc->late_init = nftl_scan_bbt;
 
        doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1360,10 +1234,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
        struct nand_chip *this = mtd_to_nand(mtd);
        struct doc_priv *doc = nand_get_controller_data(this);
 
-       this->legacy.read_byte = doc2001_read_byte;
-       this->legacy.write_buf = doc2001_writebuf;
-       this->legacy.read_buf = doc2001_readbuf;
-
        ReadDOC(doc->virtadr, ChipID);
        ReadDOC(doc->virtadr, ChipID);
        ReadDOC(doc->virtadr, ChipID);
@@ -1390,13 +1260,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
        struct nand_chip *this = mtd_to_nand(mtd);
        struct doc_priv *doc = nand_get_controller_data(this);
 
-       this->legacy.read_byte = doc2001plus_read_byte;
-       this->legacy.write_buf = doc2001plus_writebuf;
-       this->legacy.read_buf = doc2001plus_readbuf;
        doc->late_init = inftl_scan_bbt;
-       this->legacy.cmd_ctrl = NULL;
-       this->legacy.select_chip = doc2001plus_select_chip;
-       this->legacy.cmdfunc = doc2001plus_command;
        this->ecc.hwctl = doc2001plus_enable_hwecc;
 
        doc->chips_per_floor = 1;
@@ -1405,6 +1269,14 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
        return 1;
 }
 
+static const struct nand_controller_ops doc200x_ops = {
+       .exec_op = doc200x_exec_op,
+};
+
+static const struct nand_controller_ops doc2001plus_ops = {
+       .exec_op = doc2001plus_exec_op,
+};
+
 static int __init doc_probe(unsigned long physadr)
 {
        struct nand_chip *nand = NULL;
@@ -1548,7 +1420,6 @@ static int __init doc_probe(unsigned long physadr)
                goto fail;
        }
 
-
        /*
         * Allocate a RS codec instance
         *
@@ -1566,6 +1437,12 @@ static int __init doc_probe(unsigned long physadr)
                goto fail;
        }
 
+       nand_controller_init(&doc->base);
+       if (ChipID == DOC_ChipID_DocMilPlus16)
+               doc->base.ops = &doc2001plus_ops;
+       else
+               doc->base.ops = &doc200x_ops;
+
        mtd                     = nand_to_mtd(nand);
        nand->bbt_td            = (struct nand_bbt_descr *) (doc + 1);
        nand->bbt_md            = nand->bbt_td + 1;
@@ -1573,12 +1450,8 @@ static int __init doc_probe(unsigned long physadr)
        mtd->owner              = THIS_MODULE;
        mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
 
+       nand->controller        = &doc->base;
        nand_set_controller_data(nand, doc);
-       nand->legacy.select_chip        = doc200x_select_chip;
-       nand->legacy.cmd_ctrl           = doc200x_hwcontrol;
-       nand->legacy.dev_ready  = doc200x_dev_ready;
-       nand->legacy.waitfunc   = doc200x_wait;
-       nand->legacy.block_bad  = doc200x_block_bad;
        nand->ecc.hwctl         = doc200x_enable_hwecc;
        nand->ecc.calculate     = doc200x_calculate_ecc;
        nand->ecc.correct       = doc200x_correct_data;
@@ -1590,7 +1463,7 @@ static int __init doc_probe(unsigned long physadr)
        nand->ecc.options       = NAND_ECC_GENERIC_ERASED_CHECK;
        nand->bbt_options       = NAND_BBT_USE_FLASH;
        /* Skip the automatic BBT scan so we can run it manually */
-       nand->options           |= NAND_SKIP_BBTSCAN;
+       nand->options           |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
 
        doc->physadr            = physadr;
        doc->virtadr            = virtadr;
@@ -1609,13 +1482,10 @@ static int __init doc_probe(unsigned long physadr)
                numchips = doc2001_init(mtd);
 
        if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
-               /* DBB note: i believe nand_release is necessary here, as
+               /* DBB note: i believe nand_cleanup is necessary here, as
                   buffers may have been allocated in nand_base.  Check with
                   Thomas. FIX ME! */
-               /* nand_release will call mtd_device_unregister, but we
-                  haven't yet added it.  This is handled without incident by
-                  mtd_device_unregister, as far as I can tell. */
-               nand_release(nand);
+               nand_cleanup(nand);
                goto fail;
        }
 
@@ -1644,13 +1514,16 @@ static void release_nanddoc(void)
        struct mtd_info *mtd, *nextmtd;
        struct nand_chip *nand;
        struct doc_priv *doc;
+       int ret;
 
        for (mtd = doclist; mtd; mtd = nextmtd) {
                nand = mtd_to_nand(mtd);
                doc = nand_get_controller_data(nand);
 
                nextmtd = doc->nextdoc;
-               nand_release(nand);
+               ret = mtd_device_unregister(mtd);
+               WARN_ON(ret);
+               nand_cleanup(nand);
                iounmap(doc->virtadr);
                release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
                free_rs(doc->rs_decoder);
index e1dc675..088692b 100644 (file)
@@ -956,8 +956,13 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev)
 {
        struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
        struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+       struct nand_chip *chip = &priv->chip;
+       int ret;
+
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
-       nand_release(&priv->chip);
        fsl_elbc_chip_remove(priv);
 
        mutex_lock(&fsl_elbc_nand_mutex);
index 2af09ed..00ae7a9 100644 (file)
@@ -1093,8 +1093,13 @@ err:
 static int fsl_ifc_nand_remove(struct platform_device *dev)
 {
        struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+       struct nand_chip *chip = &priv->chip;
+       int ret;
+
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
-       nand_release(&priv->chip);
        fsl_ifc_chip_remove(priv);
 
        mutex_lock(&fsl_ifc_nand_mutex);
index f31fae3..627deb2 100644 (file)
@@ -317,10 +317,13 @@ err1:
 static int fun_remove(struct platform_device *ofdev)
 {
        struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
-       struct mtd_info *mtd = nand_to_mtd(&fun->chip);
-       int i;
+       struct nand_chip *chip = &fun->chip;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       int ret, i;
 
-       nand_release(&fun->chip);
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(chip);
        kfree(mtd->name);
 
        for (i = 0; i < fun->mchip_count; i++) {
index a6964fe..3909752 100644 (file)
@@ -608,6 +608,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
        unsigned int op_id;
        int i;
 
+       if (check_only)
+               return 0;
+
        pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
 
        for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -691,7 +694,7 @@ static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
        for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
                nand_read_page_op(chip, page, s * eccsize, NULL, 0);
                chip->ecc.hwctl(chip, NAND_ECC_READ);
-               ret = nand_read_data_op(chip, p, eccsize, false);
+               ret = nand_read_data_op(chip, p, eccsize, false, false);
                if (ret)
                        return ret;
 
@@ -809,11 +812,12 @@ static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
 
        i = 0;
        while (num_err--) {
-               change_bit(0, (unsigned long *)&err_idx[i]);
-               change_bit(1, (unsigned long *)&err_idx[i]);
+               err_idx[i] ^= 3;
 
                if (err_idx[i] < chip->ecc.size * 8) {
-                       change_bit(err_idx[i], (unsigned long *)dat);
+                       int err = err_idx[i];
+
+                       dat[err >> 3] ^= BIT(err & 7);
                        i++;
                }
        }
@@ -1132,7 +1136,12 @@ static int fsmc_nand_remove(struct platform_device *pdev)
        struct fsmc_nand_data *host = platform_get_drvdata(pdev);
 
        if (host) {
-               nand_release(&host->nand);
+               struct nand_chip *chip = &host->nand;
+               int ret;
+
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
                fsmc_nand_disable(host);
 
                if (host->mode == USE_DMA_ACCESS) {
index f6b1235..938077e 100644 (file)
@@ -190,8 +190,12 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
 static int gpio_nand_remove(struct platform_device *pdev)
 {
        struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &gpiomtd->nand_chip;
+       int ret;
 
-       nand_release(&gpiomtd->nand_chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        /* Enable write protection and disable the chip */
        if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
index 53b00c8..061a8dd 100644 (file)
@@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this)
                return ret;
 
        ret = pm_runtime_get_sync(this->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_autosuspend(this->dev);
                return ret;
+       }
 
        /*
        * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -834,158 +836,6 @@ map_fail:
        return false;
 }
 
-/**
- * gpmi_copy_bits - copy bits from one memory region to another
- * @dst: destination buffer
- * @dst_bit_off: bit offset we're starting to write at
- * @src: source buffer
- * @src_bit_off: bit offset we're starting to read from
- * @nbits: number of bits to copy
- *
- * This functions copies bits from one memory region to another, and is used by
- * the GPMI driver to copy ECC sections which are not guaranteed to be byte
- * aligned.
- *
- * src and dst should not overlap.
- *
- */
-static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
-                          size_t src_bit_off, size_t nbits)
-{
-       size_t i;
-       size_t nbytes;
-       u32 src_buffer = 0;
-       size_t bits_in_src_buffer = 0;
-
-       if (!nbits)
-               return;
-
-       /*
-        * Move src and dst pointers to the closest byte pointer and store bit
-        * offsets within a byte.
-        */
-       src += src_bit_off / 8;
-       src_bit_off %= 8;
-
-       dst += dst_bit_off / 8;
-       dst_bit_off %= 8;
-
-       /*
-        * Initialize the src_buffer value with bits available in the first
-        * byte of data so that we end up with a byte aligned src pointer.
-        */
-       if (src_bit_off) {
-               src_buffer = src[0] >> src_bit_off;
-               if (nbits >= (8 - src_bit_off)) {
-                       bits_in_src_buffer += 8 - src_bit_off;
-               } else {
-                       src_buffer &= GENMASK(nbits - 1, 0);
-                       bits_in_src_buffer += nbits;
-               }
-               nbits -= bits_in_src_buffer;
-               src++;
-       }
-
-       /* Calculate the number of bytes that can be copied from src to dst. */
-       nbytes = nbits / 8;
-
-       /* Try to align dst to a byte boundary. */
-       if (dst_bit_off) {
-               if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
-                       src_buffer |= src[0] << bits_in_src_buffer;
-                       bits_in_src_buffer += 8;
-                       src++;
-                       nbytes--;
-               }
-
-               if (bits_in_src_buffer >= (8 - dst_bit_off)) {
-                       dst[0] &= GENMASK(dst_bit_off - 1, 0);
-                       dst[0] |= src_buffer << dst_bit_off;
-                       src_buffer >>= (8 - dst_bit_off);
-                       bits_in_src_buffer -= (8 - dst_bit_off);
-                       dst_bit_off = 0;
-                       dst++;
-                       if (bits_in_src_buffer > 7) {
-                               bits_in_src_buffer -= 8;
-                               dst[0] = src_buffer;
-                               dst++;
-                               src_buffer >>= 8;
-                       }
-               }
-       }
-
-       if (!bits_in_src_buffer && !dst_bit_off) {
-               /*
-                * Both src and dst pointers are byte aligned, thus we can
-                * just use the optimized memcpy function.
-                */
-               if (nbytes)
-                       memcpy(dst, src, nbytes);
-       } else {
-               /*
-                * src buffer is not byte aligned, hence we have to copy each
-                * src byte to the src_buffer variable before extracting a byte
-                * to store in dst.
-                */
-               for (i = 0; i < nbytes; i++) {
-                       src_buffer |= src[i] << bits_in_src_buffer;
-                       dst[i] = src_buffer;
-                       src_buffer >>= 8;
-               }
-       }
-       /* Update dst and src pointers */
-       dst += nbytes;
-       src += nbytes;
-
-       /*
-        * nbits is the number of remaining bits. It should not exceed 8 as
-        * we've already copied as much bytes as possible.
-        */
-       nbits %= 8;
-
-       /*
-        * If there's no more bits to copy to the destination and src buffer
-        * was already byte aligned, then we're done.
-        */
-       if (!nbits && !bits_in_src_buffer)
-               return;
-
-       /* Copy the remaining bits to src_buffer */
-       if (nbits)
-               src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
-                             bits_in_src_buffer;
-       bits_in_src_buffer += nbits;
-
-       /*
-        * In case there were not enough bits to get a byte aligned dst buffer
-        * prepare the src_buffer variable to match the dst organization (shift
-        * src_buffer by dst_bit_off and retrieve the least significant bits
-        * from dst).
-        */
-       if (dst_bit_off)
-               src_buffer = (src_buffer << dst_bit_off) |
-                            (*dst & GENMASK(dst_bit_off - 1, 0));
-       bits_in_src_buffer += dst_bit_off;
-
-       /*
-        * Keep most significant bits from dst if we end up with an unaligned
-        * number of bits.
-        */
-       nbytes = bits_in_src_buffer / 8;
-       if (bits_in_src_buffer % 8) {
-               src_buffer |= (dst[nbytes] &
-                              GENMASK(7, bits_in_src_buffer % 8)) <<
-                             (nbytes * 8);
-               nbytes++;
-       }
-
-       /* Copy the remaining bytes to dst */
-       for (i = 0; i < nbytes; i++) {
-               dst[i] = src_buffer;
-               src_buffer >>= 8;
-       }
-}
-
 /* add our owner bbt descriptor */
 static uint8_t scan_ff_pattern[] = { 0xff };
 static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -1713,7 +1563,7 @@ static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
  * inline (interleaved with payload DATA), and do not align data chunk on
  * byte boundaries.
  * We thus need to take care moving the payload data and ECC bits stored in the
- * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ * page into the provided buffers, which is why we're using nand_extract_bits().
  *
  * See set_geometry_by_ecc_info inline comments to have a full description
  * of the layout used by the GPMI controller.
@@ -1762,9 +1612,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
        /* Extract interleaved payload data and ECC bits */
        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
                if (buf)
-                       gpmi_copy_bits(buf, step * eccsize * 8,
-                                      tmp_buf, src_bit_off,
-                                      eccsize * 8);
+                       nand_extract_bits(buf, step * eccsize, tmp_buf,
+                                         src_bit_off, eccsize * 8);
                src_bit_off += eccsize * 8;
 
                /* Align last ECC block to align a byte boundary */
@@ -1773,9 +1622,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
                        eccbits += 8 - ((oob_bit_off + eccbits) % 8);
 
                if (oob_required)
-                       gpmi_copy_bits(oob, oob_bit_off,
-                                      tmp_buf, src_bit_off,
-                                      eccbits);
+                       nand_extract_bits(oob, oob_bit_off, tmp_buf,
+                                         src_bit_off, eccbits);
 
                src_bit_off += eccbits;
                oob_bit_off += eccbits;
@@ -1800,7 +1648,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
  * inline (interleaved with payload DATA), and do not align data chunk on
  * byte boundaries.
  * We thus need to take care moving the OOB area at the right place in the
- * final page, which is why we're using gpmi_copy_bits.
+ * final page, which is why we're using nand_extract_bits().
  *
  * See set_geometry_by_ecc_info inline comments to have a full description
  * of the layout used by the GPMI controller.
@@ -1839,8 +1687,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
        /* Interleave payload data and ECC bits */
        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
                if (buf)
-                       gpmi_copy_bits(tmp_buf, dst_bit_off,
-                                      buf, step * eccsize * 8, eccsize * 8);
+                       nand_extract_bits(tmp_buf, dst_bit_off, buf,
+                                         step * eccsize * 8, eccsize * 8);
                dst_bit_off += eccsize * 8;
 
                /* Align last ECC block to align a byte boundary */
@@ -1849,8 +1697,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
                        eccbits += 8 - ((oob_bit_off + eccbits) % 8);
 
                if (oob_required)
-                       gpmi_copy_bits(tmp_buf, dst_bit_off,
-                                      oob, oob_bit_off, eccbits);
+                       nand_extract_bits(tmp_buf, dst_bit_off, oob,
+                                         oob_bit_off, eccbits);
 
                dst_bit_off += eccbits;
                oob_bit_off += eccbits;
@@ -2408,6 +2256,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
        struct completion *completion;
        unsigned long to;
 
+       if (check_only)
+               return 0;
+
        this->ntransfers = 0;
        for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
                this->transfers[i].direction = DMA_NONE;
@@ -2658,7 +2509,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
 
        ret = __gpmi_enable_clk(this, true);
        if (ret)
-               goto exit_nfc_init;
+               goto exit_acquire_resources;
 
        pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
        pm_runtime_use_autosuspend(&pdev->dev);
@@ -2693,11 +2544,15 @@ exit_acquire_resources:
 static int gpmi_nand_remove(struct platform_device *pdev)
 {
        struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &this->nand;
+       int ret;
 
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       nand_release(&this->nand);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        gpmi_free_dma_buffer(this);
        release_resources(this);
        return 0;
index 0b48be5..b84238e 100644 (file)
@@ -806,8 +806,12 @@ static int hisi_nfc_probe(struct platform_device *pdev)
 static int hisi_nfc_remove(struct platform_device *pdev)
 {
        struct hinfc_host *host = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &host->chip;
+       int ret;
 
-       nand_release(&host->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        return 0;
 }
index 935c490..69423bb 100644 (file)
@@ -27,9 +27,6 @@
 
 #define DRV_NAME       "ingenic-nand"
 
-/* Command delay when there is no R/B pin. */
-#define RB_DELAY_US    100
-
 struct jz_soc_info {
        unsigned long data_offset;
        unsigned long addr_offset;
@@ -49,7 +46,6 @@ struct ingenic_nfc {
        struct nand_controller controller;
        unsigned int num_banks;
        struct list_head chips;
-       int selected;
        struct ingenic_nand_cs cs[];
 };
 
@@ -102,7 +98,7 @@ static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
        return 0;
 }
 
-const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
        .ecc = qi_lb60_ooblayout_ecc,
        .free = qi_lb60_ooblayout_free,
 };
@@ -142,51 +138,6 @@ static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
        .free = jz4725b_ooblayout_free,
 };
 
-static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr)
-{
-       struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-       struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
-       struct ingenic_nand_cs *cs;
-
-       /* Ensure the currently selected chip is deasserted. */
-       if (chipnr == -1 && nfc->selected >= 0) {
-               cs = &nfc->cs[nfc->selected];
-               jz4780_nemc_assert(nfc->dev, cs->bank, false);
-       }
-
-       nfc->selected = chipnr;
-}
-
-static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
-                                 unsigned int ctrl)
-{
-       struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-       struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
-       struct ingenic_nand_cs *cs;
-
-       if (WARN_ON(nfc->selected < 0))
-               return;
-
-       cs = &nfc->cs[nfc->selected];
-
-       jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
-
-       if (cmd == NAND_CMD_NONE)
-               return;
-
-       if (ctrl & NAND_ALE)
-               writeb(cmd, cs->base + nfc->soc_info->addr_offset);
-       else if (ctrl & NAND_CLE)
-               writeb(cmd, cs->base + nfc->soc_info->cmd_offset);
-}
-
-static int ingenic_nand_dev_ready(struct nand_chip *chip)
-{
-       struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-
-       return !gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
 static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
 {
        struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
@@ -298,8 +249,91 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
        return 0;
 }
 
+static int ingenic_nand_exec_instr(struct nand_chip *chip,
+                                  struct ingenic_nand_cs *cs,
+                                  const struct nand_op_instr *instr)
+{
+       struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+       struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+       unsigned int i;
+
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               writeb(instr->ctx.cmd.opcode,
+                      cs->base + nfc->soc_info->cmd_offset);
+               return 0;
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++)
+                       writeb(instr->ctx.addr.addrs[i],
+                              cs->base + nfc->soc_info->addr_offset);
+               return 0;
+       case NAND_OP_DATA_IN_INSTR:
+               if (instr->ctx.data.force_8bit ||
+                   !(chip->options & NAND_BUSWIDTH_16))
+                       ioread8_rep(cs->base + nfc->soc_info->data_offset,
+                                   instr->ctx.data.buf.in,
+                                   instr->ctx.data.len);
+               else
+                       ioread16_rep(cs->base + nfc->soc_info->data_offset,
+                                    instr->ctx.data.buf.in,
+                                    instr->ctx.data.len);
+               return 0;
+       case NAND_OP_DATA_OUT_INSTR:
+               if (instr->ctx.data.force_8bit ||
+                   !(chip->options & NAND_BUSWIDTH_16))
+                       iowrite8_rep(cs->base + nfc->soc_info->data_offset,
+                                    instr->ctx.data.buf.out,
+                                    instr->ctx.data.len);
+               else
+                       iowrite16_rep(cs->base + nfc->soc_info->data_offset,
+                                     instr->ctx.data.buf.out,
+                                     instr->ctx.data.len);
+               return 0;
+       case NAND_OP_WAITRDY_INSTR:
+               if (!nand->busy_gpio)
+                       return nand_soft_waitrdy(chip,
+                                                instr->ctx.waitrdy.timeout_ms);
+
+               return nand_gpio_waitrdy(chip, nand->busy_gpio,
+                                        instr->ctx.waitrdy.timeout_ms);
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
+
+static int ingenic_nand_exec_op(struct nand_chip *chip,
+                               const struct nand_operation *op,
+                               bool check_only)
+{
+       struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+       struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+       struct ingenic_nand_cs *cs;
+       unsigned int i;
+       int ret = 0;
+
+       if (check_only)
+               return 0;
+
+       cs = &nfc->cs[op->cs];
+       jz4780_nemc_assert(nfc->dev, cs->bank, true);
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]);
+               if (ret)
+                       break;
+
+               if (op->instrs[i].delay_ns)
+                       ndelay(op->instrs[i].delay_ns);
+       }
+       jz4780_nemc_assert(nfc->dev, cs->bank, false);
+
+       return ret;
+}
+
 static const struct nand_controller_ops ingenic_nand_controller_ops = {
        .attach_chip = ingenic_nand_attach_chip,
+       .exec_op = ingenic_nand_exec_op,
 };
 
 static int ingenic_nand_init_chip(struct platform_device *pdev,
@@ -339,10 +373,20 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
                ret = PTR_ERR(nand->busy_gpio);
                dev_err(dev, "failed to request busy GPIO: %d\n", ret);
                return ret;
-       } else if (nand->busy_gpio) {
-               nand->chip.legacy.dev_ready = ingenic_nand_dev_ready;
        }
 
+       /*
+        * The rb-gpios semantics was undocumented and qi,lb60 (along with
+        * the ingenic driver) got it wrong. The active state encodes the
+        * NAND ready state, which is high level. Since there's no signal
+        * inverter on this board, it should be active-high. Let's fix that
+        * here for older DTs so we can re-use the generic nand_gpio_waitrdy()
+        * helper, and be consistent with what other drivers do.
+        */
+       if (of_machine_is_compatible("qi,lb60") &&
+           gpiod_is_active_low(nand->busy_gpio))
+               gpiod_toggle_active_low(nand->busy_gpio);
+
        nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
 
        if (IS_ERR(nand->wp_gpio)) {
@@ -359,12 +403,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
                return -ENOMEM;
        mtd->dev.parent = dev;
 
-       chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset;
-       chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset;
-       chip->legacy.chip_delay = RB_DELAY_US;
        chip->options = NAND_NO_SUBPAGE_WRITE;
-       chip->legacy.select_chip = ingenic_nand_select_chip;
-       chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl;
        chip->ecc.mode = NAND_ECC_HW;
        chip->controller = &nfc->controller;
        nand_set_flash_node(chip, np);
@@ -376,7 +415,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
 
        ret = mtd_device_register(mtd, NULL, 0);
        if (ret) {
-               nand_release(chip);
+               nand_cleanup(chip);
                return ret;
        }
 
@@ -387,13 +426,18 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
 
 static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
 {
-       struct ingenic_nand *chip;
+       struct ingenic_nand *ingenic_chip;
+       struct nand_chip *chip;
+       int ret;
 
        while (!list_empty(&nfc->chips)) {
-               chip = list_first_entry(&nfc->chips,
-                                       struct ingenic_nand, chip_list);
-               nand_release(&chip->chip);
-               list_del(&chip->chip_list);
+               ingenic_chip = list_first_entry(&nfc->chips,
+                                               struct ingenic_nand, chip_list);
+               chip = &ingenic_chip->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+               list_del(&ingenic_chip->chip_list);
        }
 }
 
index 9d0caad..03866b0 100644 (file)
@@ -75,6 +75,9 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
 extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
 extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
 
+/* MLC pairing schemes */
+extern const struct mtd_pairing_scheme dist3_pairing_scheme;
+
 /* Core functions */
 const struct nand_manufacturer *nand_get_manufacturer(u8 id);
 int nand_bbm_get_next_page(struct nand_chip *chip, int page);
@@ -106,6 +109,15 @@ static inline bool nand_has_exec_op(struct nand_chip *chip)
        return true;
 }
 
+static inline int nand_check_op(struct nand_chip *chip,
+                               const struct nand_operation *op)
+{
+       if (!nand_has_exec_op(chip))
+               return 0;
+
+       return chip->controller->ops->exec_op(chip, op, true);
+}
+
 static inline int nand_exec_op(struct nand_chip *chip,
                               const struct nand_operation *op)
 {
index 241b58b..7521038 100644 (file)
@@ -826,8 +826,13 @@ free_gpio:
 static int lpc32xx_nand_remove(struct platform_device *pdev)
 {
        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &host->nand_chip;
+       int ret;
+
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
-       nand_release(&host->nand_chip);
        free_irq(host->irq, host);
        if (use_dma)
                dma_release_channel(host->dma_chan);
index 163f976..b151fd0 100644 (file)
@@ -947,8 +947,12 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
 {
        uint32_t tmp;
        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &host->nand_chip;
+       int ret;
 
-       nand_release(&host->nand_chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        dma_release_channel(host->dma_chan);
 
        /* Force CE high */
index 179f0ca..260a043 100644 (file)
@@ -707,7 +707,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
         * In case the interrupt was not served in the required time frame,
         * check if the ISR was not served or if something went actually wrong.
         */
-       if (ret && !pending) {
+       if (!ret && !pending) {
                dev_err(nfc->dev, "Timeout waiting for RB signal\n");
                return -ETIMEDOUT;
        }
@@ -932,14 +932,14 @@ static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
 }
 
 /*
- * Check a chunk is correct or not according to hardware ECC engine.
+ * Check if a chunk is correct or not according to the hardware ECC engine.
  * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
  * mtd->ecc_stats.failure is not, the function will instead return a non-zero
  * value indicating that a check on the emptyness of the subpage must be
- * performed before declaring the subpage corrupted.
+ * performed before actually declaring the subpage as "corrupted".
  */
-static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
-                                     unsigned int *max_bitflips)
+static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
+                                            unsigned int *max_bitflips)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -1053,7 +1053,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
        marvell_nfc_enable_hw_ecc(chip);
        marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
                                            page);
-       ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+       ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
        marvell_nfc_disable_hw_ecc(chip);
 
        if (!ret)
@@ -1224,12 +1224,12 @@ static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
 
                /* Read spare bytes */
                nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
-                                 spare_len, false);
+                                 spare_len, false, false);
 
                /* Read ECC bytes */
                nand_read_data_op(chip, oob + ecc_offset +
                                  (ALIGN(lt->ecc_bytes, 32) * chunk),
-                                 ecc_len, false);
+                                 ecc_len, false, false);
        }
 
        return 0;
@@ -1336,7 +1336,7 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
                /* Read the chunk and detect number of bitflips */
                marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
                                                  spare, spare_len, page);
-               ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+               ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
                if (ret)
                        failure_mask |= BIT(chunk);
 
@@ -1358,10 +1358,9 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
         */
 
        /*
-        * In case there is any subpage read error reported by ->correct(), we
-        * usually re-read only ECC bytes in raw mode and check if the whole
-        * page is empty. In this case, it is normal that the ECC check failed
-        * and we just ignore the error.
+        * In case there is any subpage read error, we usually re-read only ECC
+        * bytes in raw mode and check if the whole page is empty. In this case,
+        * it is normal that the ECC check failed and we just ignore the error.
         *
         * However, it has been empirically observed that for some layouts (e.g
         * 2k page, 8b strength per 512B chunk), the controller tries to correct
@@ -2107,7 +2106,8 @@ static int marvell_nfc_exec_op(struct nand_chip *chip,
 {
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
 
-       marvell_nfc_select_target(chip, op->cs);
+       if (!check_only)
+               marvell_nfc_select_target(chip, op->cs);
 
        if (nfc->caps->is_nfcv2)
                return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
@@ -2166,8 +2166,8 @@ static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
        .free = marvell_nand_ooblayout_free,
 };
 
-static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
-                                        struct nand_ecc_ctrl *ecc)
+static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
+                                              struct nand_ecc_ctrl *ecc)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -2261,7 +2261,7 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
 
        switch (ecc->mode) {
        case NAND_ECC_HW:
-               ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+               ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
                if (ret)
                        return ret;
                break;
@@ -2664,7 +2664,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
                ret = mtd_device_register(mtd, NULL, 0);
        if (ret) {
                dev_err(dev, "failed to register mtd device: %d\n", ret);
-               nand_release(chip);
+               nand_cleanup(chip);
                return ret;
        }
 
@@ -2673,6 +2673,21 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
        return 0;
 }
 
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+       struct marvell_nand_chip *entry, *temp;
+       struct nand_chip *chip;
+       int ret;
+
+       list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+               chip = &entry->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+               list_del(&entry->node);
+       }
+}
+
 static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
 {
        struct device_node *np = dev->of_node;
@@ -2707,21 +2722,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
                ret = marvell_nand_chip_init(dev, nfc, nand_np);
                if (ret) {
                        of_node_put(nand_np);
-                       return ret;
+                       goto cleanup_chips;
                }
        }
 
        return 0;
-}
 
-static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
-{
-       struct marvell_nand_chip *entry, *temp;
+cleanup_chips:
+       marvell_nand_chips_cleanup(nfc);
 
-       list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
-               nand_release(&entry->chip);
-               list_del(&entry->node);
-       }
+       return ret;
 }
 
 static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
@@ -2854,7 +2864,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
 static int marvell_nfc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *r;
        struct marvell_nfc *nfc;
        int ret;
        int irq;
@@ -2869,8 +2878,7 @@ static int marvell_nfc_probe(struct platform_device *pdev)
        nfc->controller.ops = &marvell_nand_controller_ops;
        INIT_LIST_HEAD(&nfc->chips);
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       nfc->regs = devm_ioremap_resource(dev, r);
+       nfc->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(nfc->regs))
                return PTR_ERR(nfc->regs);
 
index f6fb5c0..3f37647 100644 (file)
@@ -899,6 +899,9 @@ static int meson_nfc_exec_op(struct nand_chip *nand,
        u32 op_id, delay_idle, cmd;
        int i;
 
+       if (check_only)
+               return 0;
+
        meson_nfc_select_chip(nand, op->cs);
        for (op_id = 0; op_id < op->ninstrs; op_id++) {
                instr = &op->instrs[op_id];
@@ -1266,7 +1269,7 @@ meson_nfc_nand_chip_init(struct device *dev,
        nand_set_flash_node(nand, np);
        nand_set_controller_data(nand, nfc);
 
-       nand->options |= NAND_USE_BOUNCE_BUFFER;
+       nand->options |= NAND_USES_DMA;
        mtd = nand_to_mtd(nand);
        mtd->owner = THIS_MODULE;
        mtd->dev.parent = dev;
index a2fcb73..18ecb09 100644 (file)
@@ -805,8 +805,11 @@ static int mpc5121_nfc_remove(struct platform_device *op)
 {
        struct device *dev = &op->dev;
        struct mtd_info *mtd = dev_get_drvdata(dev);
+       int ret;
 
-       nand_release(mtd_to_nand(mtd));
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(mtd_to_nand(mtd));
        mpc5121_nfc_free(dev, mtd);
 
        return 0;
index ef149e8..c1a6e31 100644 (file)
@@ -1380,7 +1380,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
        nand_set_flash_node(nand, np);
        nand_set_controller_data(nand, nfc);
 
-       nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
+       nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
        nand->legacy.dev_ready = mtk_nfc_dev_ready;
        nand->legacy.select_chip = mtk_nfc_select_chip;
        nand->legacy.write_byte = mtk_nfc_write_byte;
@@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
        ret = mtd_device_register(mtd, NULL, 0);
        if (ret) {
                dev_err(dev, "mtd parse partition error\n");
-               nand_release(nand);
+               nand_cleanup(nand);
                return ret;
        }
 
@@ -1578,13 +1578,18 @@ release_ecc:
 static int mtk_nfc_remove(struct platform_device *pdev)
 {
        struct mtk_nfc *nfc = platform_get_drvdata(pdev);
-       struct mtk_nfc_nand_chip *chip;
+       struct mtk_nfc_nand_chip *mtk_chip;
+       struct nand_chip *chip;
+       int ret;
 
        while (!list_empty(&nfc->chips)) {
-               chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
-                                       node);
-               nand_release(&chip->nand);
-               list_del(&chip->node);
+               mtk_chip = list_first_entry(&nfc->chips,
+                                           struct mtk_nfc_nand_chip, node);
+               chip = &mtk_chip->nand;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+               list_del(&mtk_chip->node);
        }
 
        mtk_ecc_release(nfc->ecc);
index 59554c1..09dacb8 100644 (file)
@@ -1919,8 +1919,12 @@ escan:
 static int mxcnd_remove(struct platform_device *pdev)
 {
        struct mxc_nand_host *host = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &host->nand;
+       int ret;
 
-       nand_release(&host->nand);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        if (host->clk_act)
                clk_disable_unprepare(host->clk);
 
index ed7a4e0..57f3672 100644 (file)
@@ -393,6 +393,9 @@ static int mxic_nfc_exec_op(struct nand_chip *chip,
        int ret = 0;
        unsigned int op_id;
 
+       if (check_only)
+               return 0;
+
        mxic_nfc_cs_enable(nfc);
        init_completion(&nfc->complete);
        for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -553,8 +556,13 @@ fail:
 static int mxic_nfc_remove(struct platform_device *pdev)
 {
        struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &nfc->chip;
+       int ret;
+
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
-       nand_release(&nfc->chip);
        mxic_nfc_clk_disable(nfc);
        return 0;
 }
index c24e5e2..45124db 100644 (file)
@@ -205,6 +205,56 @@ static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
        .free = nand_ooblayout_free_lp_hamming,
 };
 
+static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
+                                      struct mtd_pairing_info *info)
+{
+       int lastpage = (mtd->erasesize / mtd->writesize) - 1;
+       int dist = 3;
+
+       if (page == lastpage)
+               dist = 2;
+
+       if (!page || (page & 1)) {
+               info->group = 0;
+               info->pair = (page + 1) / 2;
+       } else {
+               info->group = 1;
+               info->pair = (page + 1 - dist) / 2;
+       }
+
+       return 0;
+}
+
+static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
+                                       const struct mtd_pairing_info *info)
+{
+       int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
+       int page = info->pair * 2;
+       int dist = 3;
+
+       if (!info->group && !info->pair)
+               return 0;
+
+       if (info->pair == lastpair && info->group)
+               dist = 2;
+
+       if (!info->group)
+               page--;
+       else if (info->pair)
+               page += dist - 1;
+
+       if (page >= mtd->erasesize / mtd->writesize)
+               return -EINVAL;
+
+       return page;
+}
+
+const struct mtd_pairing_scheme dist3_pairing_scheme = {
+       .ngroups = 2,
+       .get_info = nand_pairing_dist3_get_info,
+       .get_wunit = nand_pairing_dist3_get_wunit,
+};
+
 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
 {
        int ret = 0;
@@ -224,6 +274,50 @@ static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
        return ret;
 }
 
+/**
+ * nand_extract_bits - Copy unaligned bits from one buffer to another one
+ * @dst: destination buffer
+ * @dst_off: bit offset at which the writing starts
+ * @src: source buffer
+ * @src_off: bit offset at which the reading starts
+ * @nbits: number of bits to copy from @src to @dst
+ *
+ * Copy bits from one memory region to another (overlap authorized).
+ */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+                      unsigned int src_off, unsigned int nbits)
+{
+       unsigned int tmp, n;
+
+       dst += dst_off / 8;
+       dst_off %= 8;
+       src += src_off / 8;
+       src_off %= 8;
+
+       while (nbits) {
+               n = min3(8 - dst_off, 8 - src_off, nbits);
+
+               tmp = (*src >> src_off) & GENMASK(n - 1, 0);
+               *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
+               *dst |= tmp << dst_off;
+
+               dst_off += n;
+               if (dst_off >= 8) {
+                       dst++;
+                       dst_off -= 8;
+               }
+
+               src_off += n;
+               if (src_off >= 8) {
+                       src++;
+                       src_off -= 8;
+               }
+
+               nbits -= n;
+       }
+}
+EXPORT_SYMBOL_GPL(nand_extract_bits);
+
 /**
  * nand_select_target() - Select a NAND target (A.K.A. die)
  * @chip: NAND chip object
@@ -345,6 +439,9 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
 
 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
 {
+       if (chip->options & NAND_NO_BBM_QUIRK)
+               return 0;
+
        if (chip->legacy.block_bad)
                return chip->legacy.block_bad(chip, ofs);
 
@@ -690,7 +787,8 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
         */
        timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
        do {
-               ret = nand_read_data_op(chip, &status, sizeof(status), true);
+               ret = nand_read_data_op(chip, &status, sizeof(status), true,
+                                       false);
                if (ret)
                        break;
 
@@ -736,8 +834,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
                      unsigned long timeout_ms)
 {
-       /* Wait until R/B pin indicates chip is ready or timeout occurs */
-       timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+
+       /*
+        * Wait until R/B pin indicates chip is ready or timeout occurs.
+        * +1 below is necessary because if we are now in the last fraction
+        * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+        * small jiffy fraction - possibly leading to false timeout.
+        */
+       timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
        do {
                if (gpiod_get_value_cansleep(gpiod))
                        return 0;
@@ -770,7 +874,7 @@ void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
                        u8 status;
 
                        ret = nand_read_data_op(chip, &status, sizeof(status),
-                                               true);
+                                               true, false);
                        if (ret)
                                return;
 
@@ -1868,6 +1972,8 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
  * @buf: buffer used to store the data
  * @len: length of the buffer
  * @force_8bit: force 8-bit bus access
+ * @check_only: do not actually run the command, only checks if the
+ *              controller driver supports it
  *
  * This function does a raw data read on the bus. Usually used after launching
  * another NAND operation like nand_read_page_op().
@@ -1876,7 +1982,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
  * Returns 0 on success, a negative error code otherwise.
  */
 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
-                     bool force_8bit)
+                     bool force_8bit, bool check_only)
 {
        if (!len || !buf)
                return -EINVAL;
@@ -1889,9 +1995,15 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
 
                instrs[0].ctx.data.force_8bit = force_8bit;
 
+               if (check_only)
+                       return nand_check_op(chip, &op);
+
                return nand_exec_op(chip, &op);
        }
 
+       if (check_only)
+               return 0;
+
        if (force_8bit) {
                u8 *p = buf;
                unsigned int i;
@@ -2112,7 +2224,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
        char *prefix = "      ";
        unsigned int i;
 
-       pr_debug("executing subop:\n");
+       pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
 
        for (i = 0; i < ctx->ninstrs; i++) {
                instr = &ctx->instrs[i];
@@ -2176,6 +2288,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
                           const struct nand_operation *op, bool check_only)
 {
        struct nand_op_parser_ctx ctx = {
+               .subop.cs = op->cs,
                .subop.instrs = op->instrs,
                .instrs = op->instrs,
                .ninstrs = op->ninstrs,
@@ -2620,7 +2733,7 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
 
        if (oob_required) {
                ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
-                                       false);
+                                       false, false);
                if (ret)
                        return ret;
        }
@@ -2629,6 +2742,47 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
 }
 EXPORT_SYMBOL(nand_read_page_raw);
 
+/**
+ * nand_monolithic_read_page_raw - Monolithic page read in raw mode
+ * @chip: NAND chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This is a raw page read, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be loaded in the NAND cache and sent over the
+ * bus (from the NAND chip to the NAND controller) in a single
+ * operation. This is an alternative to nand_read_page_raw(), which
+ * first reads the main data, and if the OOB data is requested too,
+ * then reads more data on the bus.
+ */
+int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
+                                 int oob_required, int page)
+{
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       unsigned int size = mtd->writesize;
+       u8 *read_buf = buf;
+       int ret;
+
+       if (oob_required) {
+               size += mtd->oobsize;
+
+               if (buf != chip->data_buf)
+                       read_buf = nand_get_data_buf(chip);
+       }
+
+       ret = nand_read_page_op(chip, page, 0, read_buf, size);
+       if (ret)
+               return ret;
+
+       if (buf != chip->data_buf)
+               memcpy(buf, read_buf, mtd->writesize);
+
+       return 0;
+}
+EXPORT_SYMBOL(nand_monolithic_read_page_raw);
+
 /**
  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
  * @chip: nand chip info structure
@@ -2652,7 +2806,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
                return ret;
 
        for (steps = chip->ecc.steps; steps > 0; steps--) {
-               ret = nand_read_data_op(chip, buf, eccsize, false);
+               ret = nand_read_data_op(chip, buf, eccsize, false, false);
                if (ret)
                        return ret;
 
@@ -2660,14 +2814,14 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
 
                if (chip->ecc.prepad) {
                        ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
-                                               false);
+                                               false, false);
                        if (ret)
                                return ret;
 
                        oob += chip->ecc.prepad;
                }
 
-               ret = nand_read_data_op(chip, oob, eccbytes, false);
+               ret = nand_read_data_op(chip, oob, eccbytes, false, false);
                if (ret)
                        return ret;
 
@@ -2675,7 +2829,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
 
                if (chip->ecc.postpad) {
                        ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
-                                               false);
+                                               false, false);
                        if (ret)
                                return ret;
 
@@ -2685,7 +2839,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
 
        size = mtd->oobsize - (oob - chip->oob_poi);
        if (size) {
-               ret = nand_read_data_op(chip, oob, size, false);
+               ret = nand_read_data_op(chip, oob, size, false, false);
                if (ret)
                        return ret;
        }
@@ -2878,14 +3032,15 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
        for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
                chip->ecc.hwctl(chip, NAND_ECC_READ);
 
-               ret = nand_read_data_op(chip, p, eccsize, false);
+               ret = nand_read_data_op(chip, p, eccsize, false, false);
                if (ret)
                        return ret;
 
                chip->ecc.calculate(chip, p, &ecc_calc[i]);
        }
 
-       ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+       ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+                               false);
        if (ret)
                return ret;
 
@@ -2920,76 +3075,6 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
        return max_bitflips;
 }
 
-/**
- * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
- */
-static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
-                                         int oob_required, int page)
-{
-       struct mtd_info *mtd = nand_to_mtd(chip);
-       int i, eccsize = chip->ecc.size, ret;
-       int eccbytes = chip->ecc.bytes;
-       int eccsteps = chip->ecc.steps;
-       uint8_t *p = buf;
-       uint8_t *ecc_code = chip->ecc.code_buf;
-       uint8_t *ecc_calc = chip->ecc.calc_buf;
-       unsigned int max_bitflips = 0;
-
-       /* Read the OOB area first */
-       ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
-       if (ret)
-               return ret;
-
-       ret = nand_read_page_op(chip, page, 0, NULL, 0);
-       if (ret)
-               return ret;
-
-       ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
-                                        chip->ecc.total);
-       if (ret)
-               return ret;
-
-       for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-               int stat;
-
-               chip->ecc.hwctl(chip, NAND_ECC_READ);
-
-               ret = nand_read_data_op(chip, p, eccsize, false);
-               if (ret)
-                       return ret;
-
-               chip->ecc.calculate(chip, p, &ecc_calc[i]);
-
-               stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
-               if (stat == -EBADMSG &&
-                   (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
-                       /* check for empty pages with bitflips */
-                       stat = nand_check_erased_ecc_chunk(p, eccsize,
-                                               &ecc_code[i], eccbytes,
-                                               NULL, 0,
-                                               chip->ecc.strength);
-               }
-
-               if (stat < 0) {
-                       mtd->ecc_stats.failed++;
-               } else {
-                       mtd->ecc_stats.corrected += stat;
-                       max_bitflips = max_t(unsigned int, max_bitflips, stat);
-               }
-       }
-       return max_bitflips;
-}
-
 /**
  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
  * @chip: nand chip info structure
@@ -3021,13 +3106,13 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
 
                chip->ecc.hwctl(chip, NAND_ECC_READ);
 
-               ret = nand_read_data_op(chip, p, eccsize, false);
+               ret = nand_read_data_op(chip, p, eccsize, false, false);
                if (ret)
                        return ret;
 
                if (chip->ecc.prepad) {
                        ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
-                                               false);
+                                               false, false);
                        if (ret)
                                return ret;
 
@@ -3036,7 +3121,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
 
                chip->ecc.hwctl(chip, NAND_ECC_READSYN);
 
-               ret = nand_read_data_op(chip, oob, eccbytes, false);
+               ret = nand_read_data_op(chip, oob, eccbytes, false, false);
                if (ret)
                        return ret;
 
@@ -3046,7 +3131,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
 
                if (chip->ecc.postpad) {
                        ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
-                                               false);
+                                               false, false);
                        if (ret)
                                return ret;
 
@@ -3074,7 +3159,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
        /* Calculate remaining oob bytes */
        i = mtd->oobsize - (oob - chip->oob_poi);
        if (i) {
-               ret = nand_read_data_op(chip, oob, i, false);
+               ret = nand_read_data_op(chip, oob, i, false, false);
                if (ret)
                        return ret;
        }
@@ -3166,7 +3251,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
        uint32_t max_oobsize = mtd_oobavail(mtd, ops);
 
        uint8_t *bufpoi, *oob, *buf;
-       int use_bufpoi;
+       int use_bounce_buf;
        unsigned int max_bitflips = 0;
        int retry_mode = 0;
        bool ecc_fail = false;
@@ -3184,25 +3269,25 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
        oob_required = oob ? 1 : 0;
 
        while (1) {
-               unsigned int ecc_failures = mtd->ecc_stats.failed;
+               struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
 
                bytes = min(mtd->writesize - col, readlen);
                aligned = (bytes == mtd->writesize);
 
                if (!aligned)
-                       use_bufpoi = 1;
-               else if (chip->options & NAND_USE_BOUNCE_BUFFER)
-                       use_bufpoi = !virt_addr_valid(buf) ||
-                                    !IS_ALIGNED((unsigned long)buf,
-                                                chip->buf_align);
+                       use_bounce_buf = 1;
+               else if (chip->options & NAND_USES_DMA)
+                       use_bounce_buf = !virt_addr_valid(buf) ||
+                                        !IS_ALIGNED((unsigned long)buf,
+                                                    chip->buf_align);
                else
-                       use_bufpoi = 0;
+                       use_bounce_buf = 0;
 
                /* Is the current page in the buffer? */
                if (realpage != chip->pagecache.page || oob) {
-                       bufpoi = use_bufpoi ? chip->data_buf : buf;
+                       bufpoi = use_bounce_buf ? chip->data_buf : buf;
 
-                       if (use_bufpoi && aligned)
+                       if (use_bounce_buf && aligned)
                                pr_debug("%s: using read bounce buffer for buf@%p\n",
                                                 __func__, buf);
 
@@ -3223,16 +3308,19 @@ read_retry:
                                ret = chip->ecc.read_page(chip, bufpoi,
                                                          oob_required, page);
                        if (ret < 0) {
-                               if (use_bufpoi)
+                               if (use_bounce_buf)
                                        /* Invalidate page cache */
                                        chip->pagecache.page = -1;
                                break;
                        }
 
-                       /* Transfer not aligned data */
-                       if (use_bufpoi) {
+                       /*
+                        * Copy back the data in the initial buffer when reading
+                        * partial pages or when a bounce buffer is required.
+                        */
+                       if (use_bounce_buf) {
                                if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
-                                   !(mtd->ecc_stats.failed - ecc_failures) &&
+                                   !(mtd->ecc_stats.failed - ecc_stats.failed) &&
                                    (ops->mode != MTD_OPS_RAW)) {
                                        chip->pagecache.page = realpage;
                                        chip->pagecache.bitflips = ret;
@@ -3240,7 +3328,7 @@ read_retry:
                                        /* Invalidate page cache */
                                        chip->pagecache.page = -1;
                                }
-                               memcpy(buf, chip->data_buf + col, bytes);
+                               memcpy(buf, bufpoi + col, bytes);
                        }
 
                        if (unlikely(oob)) {
@@ -3255,7 +3343,7 @@ read_retry:
 
                        nand_wait_readrdy(chip);
 
-                       if (mtd->ecc_stats.failed - ecc_failures) {
+                       if (mtd->ecc_stats.failed - ecc_stats.failed) {
                                if (retry_mode + 1 < chip->read_retries) {
                                        retry_mode++;
                                        ret = nand_setup_read_retry(chip,
@@ -3263,8 +3351,8 @@ read_retry:
                                        if (ret < 0)
                                                break;
 
-                                       /* Reset failures; retry */
-                                       mtd->ecc_stats.failed = ecc_failures;
+                                       /* Reset ecc_stats; retry */
+                                       mtd->ecc_stats = ecc_stats;
                                        goto read_retry;
                                } else {
                                        /* No more retry modes; real failure */
@@ -3373,7 +3461,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
                        sndrnd = 1;
                toread = min_t(int, length, chunk);
 
-               ret = nand_read_data_op(chip, bufpoi, toread, false);
+               ret = nand_read_data_op(chip, bufpoi, toread, false, false);
                if (ret)
                        return ret;
 
@@ -3381,7 +3469,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
                length -= toread;
        }
        if (length > 0) {
-               ret = nand_read_data_op(chip, bufpoi, length, false);
+               ret = nand_read_data_op(chip, bufpoi, length, false, false);
                if (ret)
                        return ret;
        }
@@ -3633,6 +3721,42 @@ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
 }
 EXPORT_SYMBOL(nand_write_page_raw);
 
+/**
+ * nand_monolithic_write_page_raw - Monolithic page write in raw mode
+ * @chip: NAND chip info structure
+ * @buf: data buffer to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * This is a raw page write, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be sent over the bus and effectively programmed
+ * into the NAND chip arrays in a single operation. This is an
+ * alternative to nand_write_page_raw(), which first sends the main
+ * data, then eventually send the OOB data by latching more data
+ * cycles on the NAND bus, and finally sends the program command to
+ * synchronyze the NAND chip cache.
+ */
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
+                                  int oob_required, int page)
+{
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       unsigned int size = mtd->writesize;
+       u8 *write_buf = (u8 *)buf;
+
+       if (oob_required) {
+               size += mtd->oobsize;
+
+               if (buf != chip->data_buf) {
+                       write_buf = nand_get_data_buf(chip);
+                       memcpy(write_buf, buf, mtd->writesize);
+               }
+       }
+
+       return nand_prog_page_op(chip, page, 0, write_buf, size);
+}
+EXPORT_SYMBOL(nand_monolithic_write_page_raw);
+
 /**
  * nand_write_page_raw_syndrome - [INTERN] raw page write function
  * @chip: nand chip info structure
@@ -4012,20 +4136,23 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
        while (1) {
                int bytes = mtd->writesize;
                uint8_t *wbuf = buf;
-               int use_bufpoi;
+               int use_bounce_buf;
                int part_pagewr = (column || writelen < mtd->writesize);
 
                if (part_pagewr)
-                       use_bufpoi = 1;
-               else if (chip->options & NAND_USE_BOUNCE_BUFFER)
-                       use_bufpoi = !virt_addr_valid(buf) ||
-                                    !IS_ALIGNED((unsigned long)buf,
-                                                chip->buf_align);
+                       use_bounce_buf = 1;
+               else if (chip->options & NAND_USES_DMA)
+                       use_bounce_buf = !virt_addr_valid(buf) ||
+                                        !IS_ALIGNED((unsigned long)buf,
+                                                    chip->buf_align);
                else
-                       use_bufpoi = 0;
+                       use_bounce_buf = 0;
 
-               /* Partial page write?, or need to use bounce buffer */
-               if (use_bufpoi) {
+               /*
+                * Copy the data from the initial buffer when doing partial page
+                * writes or when a bounce buffer is required.
+                */
+               if (use_bounce_buf) {
                        pr_debug("%s: using write bounce buffer for buf@%p\n",
                                         __func__, buf);
                        if (part_pagewr)
@@ -4883,7 +5010,6 @@ static const char * const nand_ecc_modes[] = {
        [NAND_ECC_SOFT]         = "soft",
        [NAND_ECC_HW]           = "hw",
        [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
-       [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
        [NAND_ECC_ON_DIE]       = "on-die",
 };
 
@@ -4896,14 +5022,14 @@ static int of_get_nand_ecc_mode(struct device_node *np)
        if (err < 0)
                return err;
 
-       for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+       for (i = NAND_ECC_NONE; i < ARRAY_SIZE(nand_ecc_modes); i++)
                if (!strcasecmp(pm, nand_ecc_modes[i]))
                        return i;
 
        /*
         * For backward compatibility we support few obsoleted values that don't
-        * have their mappings into nand_ecc_modes_t anymore (they were merged
-        * with other enums).
+        * have their mappings into the nand_ecc_mode enum anymore (they were
+        * merged with other enums).
         */
        if (!strcasecmp(pm, "soft_bch"))
                return NAND_ECC_SOFT;
@@ -4917,17 +5043,20 @@ static const char * const nand_ecc_algos[] = {
        [NAND_ECC_RS]           = "rs",
 };
 
-static int of_get_nand_ecc_algo(struct device_node *np)
+static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
 {
+       enum nand_ecc_algo ecc_algo;
        const char *pm;
-       int err, i;
+       int err;
 
        err = of_property_read_string(np, "nand-ecc-algo", &pm);
        if (!err) {
-               for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
-                       if (!strcasecmp(pm, nand_ecc_algos[i]))
-                               return i;
-               return -ENODEV;
+               for (ecc_algo = NAND_ECC_HAMMING;
+                    ecc_algo < ARRAY_SIZE(nand_ecc_algos);
+                    ecc_algo++) {
+                       if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
+                               return ecc_algo;
+               }
        }
 
        /*
@@ -4935,15 +5064,14 @@ static int of_get_nand_ecc_algo(struct device_node *np)
         * for some obsoleted values that were specifying ECC algorithm.
         */
        err = of_property_read_string(np, "nand-ecc-mode", &pm);
-       if (err < 0)
-               return err;
-
-       if (!strcasecmp(pm, "soft"))
-               return NAND_ECC_HAMMING;
-       else if (!strcasecmp(pm, "soft_bch"))
-               return NAND_ECC_BCH;
+       if (!err) {
+               if (!strcasecmp(pm, "soft"))
+                       return NAND_ECC_HAMMING;
+               else if (!strcasecmp(pm, "soft_bch"))
+                       return NAND_ECC_BCH;
+       }
 
-       return -ENODEV;
+       return NAND_ECC_UNKNOWN;
 }
 
 static int of_get_nand_ecc_step_size(struct device_node *np)
@@ -4988,7 +5116,8 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
 static int nand_dt_init(struct nand_chip *chip)
 {
        struct device_node *dn = nand_get_flash_node(chip);
-       int ecc_mode, ecc_algo, ecc_strength, ecc_step;
+       enum nand_ecc_algo ecc_algo;
+       int ecc_mode, ecc_strength, ecc_step;
 
        if (!dn)
                return 0;
@@ -5010,7 +5139,7 @@ static int nand_dt_init(struct nand_chip *chip)
        if (ecc_mode >= 0)
                chip->ecc.mode = ecc_mode;
 
-       if (ecc_algo >= 0)
+       if (ecc_algo != NAND_ECC_UNKNOWN)
                chip->ecc.algo = ecc_algo;
 
        if (ecc_strength >= 0)
@@ -5140,8 +5269,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
                ecc->read_page = nand_read_page_swecc;
                ecc->read_subpage = nand_read_subpage;
                ecc->write_page = nand_write_page_swecc;
-               ecc->read_page_raw = nand_read_page_raw;
-               ecc->write_page_raw = nand_write_page_raw;
+               if (!ecc->read_page_raw)
+                       ecc->read_page_raw = nand_read_page_raw;
+               if (!ecc->write_page_raw)
+                       ecc->write_page_raw = nand_write_page_raw;
                ecc->read_oob = nand_read_oob_std;
                ecc->write_oob = nand_write_oob_std;
                if (!ecc->size)
@@ -5163,8 +5294,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
                ecc->read_page = nand_read_page_swecc;
                ecc->read_subpage = nand_read_subpage;
                ecc->write_page = nand_write_page_swecc;
-               ecc->read_page_raw = nand_read_page_raw;
-               ecc->write_page_raw = nand_write_page_raw;
+               if (!ecc->read_page_raw)
+                       ecc->read_page_raw = nand_read_page_raw;
+               if (!ecc->write_page_raw)
+                       ecc->write_page_raw = nand_write_page_raw;
                ecc->read_oob = nand_read_oob_std;
                ecc->write_oob = nand_write_oob_std;
 
@@ -5628,16 +5761,6 @@ static int nand_scan_tail(struct nand_chip *chip)
         */
 
        switch (ecc->mode) {
-       case NAND_ECC_HW_OOB_FIRST:
-               /* Similar to NAND_ECC_HW, but a separate read_page handle */
-               if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
-                       WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
-                       ret = -EINVAL;
-                       goto err_nand_manuf_cleanup;
-               }
-               if (!ecc->read_page)
-                       ecc->read_page = nand_read_page_hwecc_oob_first;
-               fallthrough;
        case NAND_ECC_HW:
                /* Use standard hwecc read page function? */
                if (!ecc->read_page)
@@ -5781,8 +5904,10 @@ static int nand_scan_tail(struct nand_chip *chip)
 
        /* ECC sanity check: warn if it's too weak */
        if (!nand_ecc_strength_good(chip))
-               pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
-                       mtd->name);
+               pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
+                       mtd->name, chip->ecc.strength, chip->ecc.size,
+                       chip->base.eccreq.strength,
+                       chip->base.eccreq.step_size);
 
        /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
        if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
@@ -5975,18 +6100,6 @@ void nand_cleanup(struct nand_chip *chip)
 
 EXPORT_SYMBOL_GPL(nand_cleanup);
 
-/**
- * nand_release - [NAND Interface] Unregister the MTD device and free resources
- *               held by the NAND device
- * @chip: NAND chip object
- */
-void nand_release(struct nand_chip *chip)
-{
-       mtd_device_unregister(nand_to_mtd(chip));
-       nand_cleanup(chip);
-}
-EXPORT_SYMBOL_GPL(nand_release);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
index 1752731..d5af8c5 100644 (file)
@@ -41,7 +41,7 @@ int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
        unsigned int i;
 
        memset(code, 0, chip->ecc.bytes);
-       encode_bch(nbc->bch, buf, chip->ecc.size, code);
+       bch_encode(nbc->bch, buf, chip->ecc.size, code);
 
        /* apply mask so that an erased page is a valid codeword */
        for (i = 0; i < chip->ecc.bytes; i++)
@@ -67,7 +67,7 @@ int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
        unsigned int *errloc = nbc->errloc;
        int i, count;
 
-       count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+       count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
                           NULL, errloc);
        if (count > 0) {
                for (i = 0; i < count; i++) {
@@ -130,7 +130,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
        if (!nbc)
                goto fail;
 
-       nbc->bch = init_bch(m, t, 0);
+       nbc->bch = bch_init(m, t, 0, false);
        if (!nbc->bch)
                goto fail;
 
@@ -182,7 +182,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
                goto fail;
 
        memset(erased_page, 0xff, eccsize);
-       encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+       bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask);
        kfree(erased_page);
 
        for (i = 0; i < eccbytes; i++)
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(nand_bch_init);
 void nand_bch_free(struct nand_bch_control *nbc)
 {
        if (nbc) {
-               free_bch(nbc->bch);
+               bch_free(nbc->bch);
                kfree(nbc->errloc);
                kfree(nbc->eccmask);
                kfree(nbc);
index 9b540e7..b15c42f 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "internals.h"
 
+#define JEDEC_PARAM_PAGES 3
+
 /*
  * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
  */
@@ -25,9 +27,11 @@ int nand_jedec_detect(struct nand_chip *chip)
        struct nand_memory_organization *memorg;
        struct nand_jedec_params *p;
        struct jedec_ecc_info *ecc;
+       bool use_datain = false;
        int jedec_version = 0;
        char id[5];
        int i, val, ret;
+       u16 crc;
 
        memorg = nanddev_get_memorg(&chip->base);
 
@@ -41,25 +45,31 @@ int nand_jedec_detect(struct nand_chip *chip)
        if (!p)
                return -ENOMEM;
 
-       ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
-       if (ret) {
-               ret = 0;
-               goto free_jedec_param_page;
-       }
-
-       for (i = 0; i < 3; i++) {
-               ret = nand_read_data_op(chip, p, sizeof(*p), true);
+       if (!nand_has_exec_op(chip) ||
+           !nand_read_data_op(chip, p, sizeof(*p), true, true))
+               use_datain = true;
+
+       for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
+               if (!i)
+                       ret = nand_read_param_page_op(chip, 0x40, p,
+                                                     sizeof(*p));
+               else if (use_datain)
+                       ret = nand_read_data_op(chip, p, sizeof(*p), true,
+                                               false);
+               else
+                       ret = nand_change_read_column_op(chip, sizeof(*p) * i,
+                                                        p, sizeof(*p), true);
                if (ret) {
                        ret = 0;
                        goto free_jedec_param_page;
                }
 
-               if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
-                               le16_to_cpu(p->crc))
+               crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
+               if (crc == le16_to_cpu(p->crc))
                        break;
        }
 
-       if (i == 3) {
+       if (i == JEDEC_PARAM_PAGES) {
                pr_err("Could not find valid JEDEC parameter page; aborting\n");
                goto free_jedec_param_page;
        }
index f91e92e..d64791c 100644 (file)
@@ -225,7 +225,8 @@ static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
        do {
                u8 status;
 
-               ret = nand_read_data_op(chip, &status, sizeof(status), true);
+               ret = nand_read_data_op(chip, &status, sizeof(status), true,
+                                       false);
                if (ret)
                        return;
 
@@ -552,7 +553,8 @@ static int nand_wait(struct nand_chip *chip)
                                        break;
                        } else {
                                ret = nand_read_data_op(chip, &status,
-                                                       sizeof(status), true);
+                                                       sizeof(status), true,
+                                                       false);
                                if (ret)
                                        return ret;
 
@@ -563,7 +565,7 @@ static int nand_wait(struct nand_chip *chip)
                } while (time_before(jiffies, timeo));
        }
 
-       ret = nand_read_data_op(chip, &status, sizeof(status), true);
+       ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
        if (ret)
                return ret;
 
index 5665403..3589b4f 100644 (file)
@@ -192,6 +192,7 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
        struct micron_nand *micron = nand_get_manufacturer_data(chip);
        struct mtd_info *mtd = nand_to_mtd(chip);
        unsigned int step, max_bitflips = 0;
+       bool use_datain = false;
        int ret;
 
        if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
@@ -211,8 +212,27 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
         * in non-raw mode, even if the user did not request those bytes.
         */
        if (!oob_required) {
-               ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
-                                       false);
+               /*
+                * We first check which operation is supported by the controller
+                * before running it. This trick makes it possible to support
+                * all controllers, even the most constraints, without almost
+                * any performance hit.
+                *
+                * TODO: could be enhanced to avoid repeating the same check
+                * over and over in the fast path.
+                */
+               if (!nand_has_exec_op(chip) ||
+                   !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+                                      true))
+                       use_datain = true;
+
+               if (use_datain)
+                       ret = nand_read_data_op(chip, chip->oob_poi,
+                                               mtd->oobsize, false, false);
+               else
+                       ret = nand_change_read_column_op(chip, mtd->writesize,
+                                                        chip->oob_poi,
+                                                        mtd->oobsize, false);
                if (ret)
                        return ret;
        }
@@ -285,6 +305,7 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
                                 int oob_required, int page)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
+       bool use_datain = false;
        u8 status;
        int ret, max_bitflips = 0;
 
@@ -300,14 +321,36 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
        if (ret)
                goto out;
 
-       ret = nand_exit_status_op(chip);
-       if (ret)
-               goto out;
+       /*
+        * We first check which operation is supported by the controller before
+        * running it. This trick makes it possible to support all controllers,
+        * even the most constraints, without almost any performance hit.
+        *
+        * TODO: could be enhanced to avoid repeating the same check over and
+        * over in the fast path.
+        */
+       if (!nand_has_exec_op(chip) ||
+           !nand_read_data_op(chip, buf, mtd->writesize, false, true))
+               use_datain = true;
 
-       ret = nand_read_data_op(chip, buf, mtd->writesize, false);
-       if (!ret && oob_required)
-               ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+       if (use_datain) {
+               ret = nand_exit_status_op(chip);
+               if (ret)
+                       goto out;
+
+               ret = nand_read_data_op(chip, buf, mtd->writesize, false,
                                        false);
+               if (!ret && oob_required)
+                       ret = nand_read_data_op(chip, chip->oob_poi,
+                                               mtd->oobsize, false, false);
+       } else {
+               ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
+                                                false);
+               if (!ret && oob_required)
+                       ret = nand_change_read_column_op(chip, mtd->writesize,
+                                                        chip->oob_poi,
+                                                        mtd->oobsize, false);
+       }
 
        if (chip->ecc.strength == 4)
                max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
@@ -508,8 +551,10 @@ static int micron_nand_init(struct nand_chip *chip)
                        chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
                        chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
                } else {
-                       chip->ecc.read_page_raw = nand_read_page_raw;
-                       chip->ecc.write_page_raw = nand_write_page_raw;
+                       if (!chip->ecc.read_page_raw)
+                               chip->ecc.read_page_raw = nand_read_page_raw;
+                       if (!chip->ecc.write_page_raw)
+                               chip->ecc.write_page_raw = nand_write_page_raw;
                }
        }
 
index 0b879bd..be34566 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "internals.h"
 
+#define ONFI_PARAM_PAGES 3
+
 u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
 {
        int i;
@@ -45,12 +47,10 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
        if (!ep)
                return -ENOMEM;
 
-       /* Send our own NAND_CMD_PARAM. */
-       ret = nand_read_param_page_op(chip, 0, NULL, 0);
-       if (ret)
-               goto ext_out;
-
-       /* Use the Change Read Column command to skip the ONFI param pages. */
+       /*
+        * Use the Change Read Column command to skip the ONFI param pages and
+        * ensure we read at the right location.
+        */
        ret = nand_change_read_column_op(chip,
                                         sizeof(*p) * p->num_of_param_pages,
                                         ep, len, true);
@@ -141,11 +141,13 @@ int nand_onfi_detect(struct nand_chip *chip)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct nand_memory_organization *memorg;
-       struct nand_onfi_params *p;
+       struct nand_onfi_params *p = NULL, *pbuf;
        struct onfi_params *onfi;
+       bool use_datain = false;
        int onfi_version = 0;
        char id[4];
        int i, ret, val;
+       u16 crc;
 
        memorg = nanddev_get_memorg(&chip->base);
 
@@ -155,43 +157,54 @@ int nand_onfi_detect(struct nand_chip *chip)
                return 0;
 
        /* ONFI chip: allocate a buffer to hold its parameter page */
-       p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
-       if (!p)
+       pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
+       if (!pbuf)
                return -ENOMEM;
 
-       ret = nand_read_param_page_op(chip, 0, NULL, 0);
-       if (ret) {
-               ret = 0;
-               goto free_onfi_param_page;
-       }
-
-       for (i = 0; i < 3; i++) {
-               ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
+       if (!nand_has_exec_op(chip) ||
+           !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true))
+               use_datain = true;
+
+       for (i = 0; i < ONFI_PARAM_PAGES; i++) {
+               if (!i)
+                       ret = nand_read_param_page_op(chip, 0, &pbuf[i],
+                                                     sizeof(*pbuf));
+               else if (use_datain)
+                       ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
+                                               true, false);
+               else
+                       ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
+                                                        &pbuf[i], sizeof(*pbuf),
+                                                        true);
                if (ret) {
                        ret = 0;
                        goto free_onfi_param_page;
                }
 
-               if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
-                               le16_to_cpu(p->crc)) {
-                       if (i)
-                               memcpy(p, &p[i], sizeof(*p));
+               crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
+               if (crc == le16_to_cpu(pbuf[i].crc)) {
+                       p = &pbuf[i];
                        break;
                }
        }
 
-       if (i == 3) {
-               const void *srcbufs[3] = {p, p + 1, p + 2};
+       if (i == ONFI_PARAM_PAGES) {
+               const void *srcbufs[ONFI_PARAM_PAGES];
+               unsigned int j;
+
+               for (j = 0; j < ONFI_PARAM_PAGES; j++)
+                       srcbufs[j] = pbuf + j;
 
                pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
-               nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
-                                      sizeof(*p));
+               nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
+                                      sizeof(*pbuf));
 
-               if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
-                               le16_to_cpu(p->crc)) {
+               crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
+               if (crc != le16_to_cpu(pbuf->crc)) {
                        pr_err("ONFI parameter recovery failed, aborting\n");
                        goto free_onfi_param_page;
                }
+               p = pbuf;
        }
 
        if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
@@ -299,14 +312,14 @@ int nand_onfi_detect(struct nand_chip *chip)
        chip->parameters.onfi = onfi;
 
        /* Identification done, free the full ONFI parameter page and exit */
-       kfree(p);
+       kfree(pbuf);
 
        return 1;
 
 free_model:
        kfree(chip->parameters.model);
 free_onfi_param_page:
-       kfree(p);
+       kfree(pbuf);
 
        return ret;
 }
index f64b06a..36d21be 100644 (file)
@@ -16,6 +16,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        /* Mode 0 */
        {
                .type = NAND_SDR_IFACE,
+               .timings.mode = 0,
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
@@ -58,6 +59,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        /* Mode 1 */
        {
                .type = NAND_SDR_IFACE,
+               .timings.mode = 1,
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
@@ -100,6 +102,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        /* Mode 2 */
        {
                .type = NAND_SDR_IFACE,
+               .timings.mode = 2,
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
@@ -142,6 +145,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        /* Mode 3 */
        {
                .type = NAND_SDR_IFACE,
+               .timings.mode = 3,
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
@@ -184,6 +188,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        /* Mode 4 */
        {
                .type = NAND_SDR_IFACE,
+               .timings.mode = 4,
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
@@ -226,6 +231,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        /* Mode 5 */
        {
                .type = NAND_SDR_IFACE,
+               .timings.mode = 5,
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
@@ -314,10 +320,9 @@ int onfi_fill_data_interface(struct nand_chip *chip,
                /* microseconds -> picoseconds */
                timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
                timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
-               timings->tR_max = 1000000ULL * 200000000ULL;
 
-               /* nanoseconds -> picoseconds */
-               timings->tCCS_min = 1000UL * 500000;
+               timings->tR_max = 200000000;
+               timings->tCCS_min = 500000;
        }
 
        return 0;
index f3dcd69..ae06990 100644 (file)
@@ -194,6 +194,17 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
        }
 }
 
+static int tc58teg5dclta00_init(struct nand_chip *chip)
+{
+       struct mtd_info *mtd = nand_to_mtd(chip);
+
+       chip->onfi_timing_mode_default = 5;
+       chip->options |= NAND_NEED_SCRAMBLING;
+       mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
+
+       return 0;
+}
+
 static int toshiba_nand_init(struct nand_chip *chip)
 {
        if (nand_is_slc(chip))
@@ -204,6 +215,9 @@ static int toshiba_nand_init(struct nand_chip *chip)
            chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
                toshiba_nand_benand_init(chip);
 
+       if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
+               tc58teg5dclta00_init(chip);
+
        return 0;
 }
 
index 1de03bb..0a5cb77 100644 (file)
@@ -353,6 +353,9 @@ struct nandsim {
        void *file_buf;
        struct page *held_pages[NS_MAX_HELD_PAGES];
        int held_cnt;
+
+       /* debugfs entry */
+       struct dentry *dent;
 };
 
 /*
@@ -432,7 +435,7 @@ static unsigned long total_wear = 0;
 /* MTD structure for NAND controller */
 static struct mtd_info *nsmtd;
 
-static int nandsim_show(struct seq_file *m, void *private)
+static int ns_show(struct seq_file *m, void *private)
 {
        unsigned long wmin = -1, wmax = 0, avg;
        unsigned long deciles[10], decile_max[10], tot = 0;
@@ -483,19 +486,18 @@ static int nandsim_show(struct seq_file *m, void *private)
 
        return 0;
 }
-DEFINE_SHOW_ATTRIBUTE(nandsim);
+DEFINE_SHOW_ATTRIBUTE(ns);
 
 /**
- * nandsim_debugfs_create - initialize debugfs
- * @dev: nandsim device description object
+ * ns_debugfs_create - initialize debugfs
+ * @ns: nandsim device description object
  *
  * This function creates all debugfs files for UBI device @ubi. Returns zero in
  * case of success and a negative error code in case of failure.
  */
-static int nandsim_debugfs_create(struct nandsim *dev)
+static int ns_debugfs_create(struct nandsim *ns)
 {
        struct dentry *root = nsmtd->dbg.dfs_dir;
-       struct dentry *dent;
 
        /*
         * Just skip debugfs initialization when the debugfs directory is
@@ -508,9 +510,9 @@ static int nandsim_debugfs_create(struct nandsim *dev)
                return 0;
        }
 
-       dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
-                                  root, dev, &nandsim_fops);
-       if (IS_ERR_OR_NULL(dent)) {
+       ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
+                                      &ns_fops);
+       if (IS_ERR_OR_NULL(ns->dent)) {
                NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
                return -1;
        }
@@ -518,13 +520,18 @@ static int nandsim_debugfs_create(struct nandsim *dev)
        return 0;
 }
 
+static void ns_debugfs_remove(struct nandsim *ns)
+{
+       debugfs_remove_recursive(ns->dent);
+}
+
 /*
  * Allocate array of page pointers, create slab allocation for an array
  * and initialize the array by NULL pointers.
  *
  * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
  */
-static int __init alloc_device(struct nandsim *ns)
+static int __init ns_alloc_device(struct nandsim *ns)
 {
        struct file *cfile;
        int i, err;
@@ -536,12 +543,12 @@ static int __init alloc_device(struct nandsim *ns)
                if (!(cfile->f_mode & FMODE_CAN_READ)) {
                        NS_ERR("alloc_device: cache file not readable\n");
                        err = -EINVAL;
-                       goto err_close;
+                       goto err_close_filp;
                }
                if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
                        NS_ERR("alloc_device: cache file not writeable\n");
                        err = -EINVAL;
-                       goto err_close;
+                       goto err_close_filp;
                }
                ns->pages_written =
                        vzalloc(array_size(sizeof(unsigned long),
@@ -549,16 +556,24 @@ static int __init alloc_device(struct nandsim *ns)
                if (!ns->pages_written) {
                        NS_ERR("alloc_device: unable to allocate pages written array\n");
                        err = -ENOMEM;
-                       goto err_close;
+                       goto err_close_filp;
                }
                ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
                if (!ns->file_buf) {
                        NS_ERR("alloc_device: unable to allocate file buf\n");
                        err = -ENOMEM;
-                       goto err_free;
+                       goto err_free_pw;
                }
                ns->cfile = cfile;
+
                return 0;
+
+err_free_pw:
+               vfree(ns->pages_written);
+err_close_filp:
+               filp_close(cfile, NULL);
+
+               return err;
        }
 
        ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
@@ -573,22 +588,22 @@ static int __init alloc_device(struct nandsim *ns)
                                                ns->geom.pgszoob, 0, 0, NULL);
        if (!ns->nand_pages_slab) {
                NS_ERR("cache_create: unable to create kmem_cache\n");
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto err_free_pg;
        }
 
        return 0;
 
-err_free:
-       vfree(ns->pages_written);
-err_close:
-       filp_close(cfile, NULL);
+err_free_pg:
+       vfree(ns->pages);
+
        return err;
 }
 
 /*
  * Free any allocated pages, and free the array of page pointers.
  */
-static void free_device(struct nandsim *ns)
+static void ns_free_device(struct nandsim *ns)
 {
        int i;
 
@@ -610,7 +625,7 @@ static void free_device(struct nandsim *ns)
        }
 }
 
-static char __init *get_partition_name(int i)
+static char __init *ns_get_partition_name(int i)
 {
        return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
 }
@@ -620,7 +635,7 @@ static char __init *get_partition_name(int i)
  *
  * RETURNS: 0 if success, -ERRNO if failure.
  */
-static int __init init_nandsim(struct mtd_info *mtd)
+static int __init ns_init(struct mtd_info *mtd)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct nandsim   *ns   = nand_get_controller_data(chip);
@@ -693,7 +708,7 @@ static int __init init_nandsim(struct mtd_info *mtd)
                        NS_ERR("bad partition size.\n");
                        return -EINVAL;
                }
-               ns->partitions[i].name   = get_partition_name(i);
+               ns->partitions[i].name = ns_get_partition_name(i);
                if (!ns->partitions[i].name) {
                        NS_ERR("unable to allocate memory.\n");
                        return -ENOMEM;
@@ -707,12 +722,14 @@ static int __init init_nandsim(struct mtd_info *mtd)
        if (remains) {
                if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
                        NS_ERR("too many partitions.\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto free_partition_names;
                }
-               ns->partitions[i].name   = get_partition_name(i);
+               ns->partitions[i].name = ns_get_partition_name(i);
                if (!ns->partitions[i].name) {
                        NS_ERR("unable to allocate memory.\n");
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto free_partition_names;
                }
                ns->partitions[i].offset = next_offset;
                ns->partitions[i].size   = remains;
@@ -739,33 +756,48 @@ static int __init init_nandsim(struct mtd_info *mtd)
        printk("sector address bytes: %u\n",    ns->geom.secaddrbytes);
        printk("options: %#x\n",                ns->options);
 
-       if ((ret = alloc_device(ns)) != 0)
-               return ret;
+       ret = ns_alloc_device(ns);
+       if (ret)
+               goto free_partition_names;
 
        /* Allocate / initialize the internal buffer */
        ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
        if (!ns->buf.byte) {
                NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
                        ns->geom.pgszoob);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto free_device;
        }
        memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
 
        return 0;
+
+free_device:
+       ns_free_device(ns);
+free_partition_names:
+       for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+               kfree(ns->partitions[i].name);
+
+       return ret;
 }
 
 /*
  * Free the nandsim structure.
  */
-static void free_nandsim(struct nandsim *ns)
+static void ns_free(struct nandsim *ns)
 {
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+               kfree(ns->partitions[i].name);
+
        kfree(ns->buf.byte);
-       free_device(ns);
+       ns_free_device(ns);
 
        return;
 }
 
-static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
 {
        char *w;
        int zero_ok;
@@ -793,7 +825,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
        return 0;
 }
 
-static int parse_weakblocks(void)
+static int ns_parse_weakblocks(void)
 {
        char *w;
        int zero_ok;
@@ -830,7 +862,7 @@ static int parse_weakblocks(void)
        return 0;
 }
 
-static int erase_error(unsigned int erase_block_no)
+static int ns_erase_error(unsigned int erase_block_no)
 {
        struct weak_block *wb;
 
@@ -844,7 +876,7 @@ static int erase_error(unsigned int erase_block_no)
        return 0;
 }
 
-static int parse_weakpages(void)
+static int ns_parse_weakpages(void)
 {
        char *w;
        int zero_ok;
@@ -881,7 +913,7 @@ static int parse_weakpages(void)
        return 0;
 }
 
-static int write_error(unsigned int page_no)
+static int ns_write_error(unsigned int page_no)
 {
        struct weak_page *wp;
 
@@ -895,7 +927,7 @@ static int write_error(unsigned int page_no)
        return 0;
 }
 
-static int parse_gravepages(void)
+static int ns_parse_gravepages(void)
 {
        char *g;
        int zero_ok;
@@ -932,7 +964,7 @@ static int parse_gravepages(void)
        return 0;
 }
 
-static int read_error(unsigned int page_no)
+static int ns_read_error(unsigned int page_no)
 {
        struct grave_page *gp;
 
@@ -946,25 +978,7 @@ static int read_error(unsigned int page_no)
        return 0;
 }
 
-static void free_lists(void)
-{
-       struct list_head *pos, *n;
-       list_for_each_safe(pos, n, &weak_blocks) {
-               list_del(pos);
-               kfree(list_entry(pos, struct weak_block, list));
-       }
-       list_for_each_safe(pos, n, &weak_pages) {
-               list_del(pos);
-               kfree(list_entry(pos, struct weak_page, list));
-       }
-       list_for_each_safe(pos, n, &grave_pages) {
-               list_del(pos);
-               kfree(list_entry(pos, struct grave_page, list));
-       }
-       kfree(erase_block_wear);
-}
-
-static int setup_wear_reporting(struct mtd_info *mtd)
+static int ns_setup_wear_reporting(struct mtd_info *mtd)
 {
        size_t mem;
 
@@ -982,7 +996,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
        return 0;
 }
 
-static void update_wear(unsigned int erase_block_no)
+static void ns_update_wear(unsigned int erase_block_no)
 {
        if (!erase_block_wear)
                return;
@@ -1001,7 +1015,7 @@ static void update_wear(unsigned int erase_block_no)
 /*
  * Returns the string representation of 'state' state.
  */
-static char *get_state_name(uint32_t state)
+static char *ns_get_state_name(uint32_t state)
 {
        switch (NS_STATE(state)) {
                case STATE_CMD_READ0:
@@ -1061,7 +1075,7 @@ static char *get_state_name(uint32_t state)
  *
  * RETURNS: 1 if wrong command, 0 if right.
  */
-static int check_command(int cmd)
+static int ns_check_command(int cmd)
 {
        switch (cmd) {
 
@@ -1088,7 +1102,7 @@ static int check_command(int cmd)
 /*
  * Returns state after command is accepted by command number.
  */
-static uint32_t get_state_by_command(unsigned command)
+static uint32_t ns_get_state_by_command(unsigned command)
 {
        switch (command) {
                case NAND_CMD_READ0:
@@ -1126,7 +1140,7 @@ static uint32_t get_state_by_command(unsigned command)
 /*
  * Move an address byte to the correspondent internal register.
  */
-static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
+static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
 {
        uint byte = (uint)bt;
 
@@ -1144,9 +1158,10 @@ static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
 /*
  * Switch to STATE_READY state.
  */
-static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
+static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
 {
-       NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+       NS_DBG("switch_to_ready_state: switch to %s state\n",
+              ns_get_state_name(STATE_READY));
 
        ns->state       = STATE_READY;
        ns->nxstate     = STATE_UNKNOWN;
@@ -1203,7 +1218,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
  *          -1 - several matches.
  *           0 - operation is found.
  */
-static int find_operation(struct nandsim *ns, uint32_t flag)
+static int ns_find_operation(struct nandsim *ns, uint32_t flag)
 {
        int opsfound = 0;
        int i, j, idx = 0;
@@ -1256,7 +1271,8 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
                ns->state = ns->op[ns->stateidx];
                ns->nxstate = ns->op[ns->stateidx + 1];
                NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
-                               idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+                      idx, ns_get_state_name(ns->state),
+                      ns_get_state_name(ns->nxstate));
                return 0;
        }
 
@@ -1264,13 +1280,13 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
                /* Nothing was found. Try to ignore previous commands (if any) and search again */
                if (ns->npstates != 0) {
                        NS_DBG("find_operation: no operation found, try again with state %s\n",
-                                       get_state_name(ns->state));
+                              ns_get_state_name(ns->state));
                        ns->npstates = 0;
-                       return find_operation(ns, 0);
+                       return ns_find_operation(ns, 0);
 
                }
                NS_DBG("find_operation: no operations found\n");
-               switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+               ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                return -2;
        }
 
@@ -1287,7 +1303,7 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
        return -1;
 }
 
-static void put_pages(struct nandsim *ns)
+static void ns_put_pages(struct nandsim *ns)
 {
        int i;
 
@@ -1296,7 +1312,8 @@ static void put_pages(struct nandsim *ns)
 }
 
 /* Get page cache pages in advance to provide NOFS memory allocation */
-static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
+                       loff_t pos)
 {
        pgoff_t index, start_index, end_index;
        struct page *page;
@@ -1316,7 +1333,7 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
                                page = find_or_create_page(mapping, index, GFP_NOFS);
                        }
                        if (page == NULL) {
-                               put_pages(ns);
+                               ns_put_pages(ns);
                                return -ENOMEM;
                        }
                        unlock_page(page);
@@ -1326,35 +1343,37 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
        return 0;
 }
 
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
+                           size_t count, loff_t pos)
 {
        ssize_t tx;
        int err;
        unsigned int noreclaim_flag;
 
-       err = get_pages(ns, file, count, pos);
+       err = ns_get_pages(ns, file, count, pos);
        if (err)
                return err;
        noreclaim_flag = memalloc_noreclaim_save();
        tx = kernel_read(file, buf, count, &pos);
        memalloc_noreclaim_restore(noreclaim_flag);
-       put_pages(ns);
+       ns_put_pages(ns);
        return tx;
 }
 
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
+                            size_t count, loff_t pos)
 {
        ssize_t tx;
        int err;
        unsigned int noreclaim_flag;
 
-       err = get_pages(ns, file, count, pos);
+       err = ns_get_pages(ns, file, count, pos);
        if (err)
                return err;
        noreclaim_flag = memalloc_noreclaim_save();
        tx = kernel_write(file, buf, count, &pos);
        memalloc_noreclaim_restore(noreclaim_flag);
-       put_pages(ns);
+       ns_put_pages(ns);
        return tx;
 }
 
@@ -1374,11 +1393,11 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
        return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
 }
 
-static int do_read_error(struct nandsim *ns, int num)
+static int ns_do_read_error(struct nandsim *ns, int num)
 {
        unsigned int page_no = ns->regs.row;
 
-       if (read_error(page_no)) {
+       if (ns_read_error(page_no)) {
                prandom_bytes(ns->buf.byte, num);
                NS_WARN("simulating read error in page %u\n", page_no);
                return 1;
@@ -1386,7 +1405,7 @@ static int do_read_error(struct nandsim *ns, int num)
        return 0;
 }
 
-static void do_bit_flips(struct nandsim *ns, int num)
+static void ns_do_bit_flips(struct nandsim *ns, int num)
 {
        if (bitflips && prandom_u32() < (1 << 22)) {
                int flips = 1;
@@ -1406,7 +1425,7 @@ static void do_bit_flips(struct nandsim *ns, int num)
 /*
  * Fill the NAND buffer with data read from the specified page.
  */
-static void read_page(struct nandsim *ns, int num)
+static void ns_read_page(struct nandsim *ns, int num)
 {
        union ns_mem *mypage;
 
@@ -1420,15 +1439,16 @@ static void read_page(struct nandsim *ns, int num)
 
                        NS_DBG("read_page: page %d written, reading from %d\n",
                                ns->regs.row, ns->regs.column + ns->regs.off);
-                       if (do_read_error(ns, num))
+                       if (ns_do_read_error(ns, num))
                                return;
                        pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
-                       tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
+                       tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
+                                         pos);
                        if (tx != num) {
                                NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return;
                        }
-                       do_bit_flips(ns, num);
+                       ns_do_bit_flips(ns, num);
                }
                return;
        }
@@ -1440,17 +1460,17 @@ static void read_page(struct nandsim *ns, int num)
        } else {
                NS_DBG("read_page: page %d allocated, reading from %d\n",
                        ns->regs.row, ns->regs.column + ns->regs.off);
-               if (do_read_error(ns, num))
+               if (ns_do_read_error(ns, num))
                        return;
                memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
-               do_bit_flips(ns, num);
+               ns_do_bit_flips(ns, num);
        }
 }
 
 /*
  * Erase all pages in the specified sector.
  */
-static void erase_sector(struct nandsim *ns)
+static void ns_erase_sector(struct nandsim *ns)
 {
        union ns_mem *mypage;
        int i;
@@ -1478,7 +1498,7 @@ static void erase_sector(struct nandsim *ns)
 /*
  * Program the specified page with the contents from the NAND buffer.
  */
-static int prog_page(struct nandsim *ns, int num)
+static int ns_prog_page(struct nandsim *ns, int num)
 {
        int i;
        union ns_mem *mypage;
@@ -1497,7 +1517,7 @@ static int prog_page(struct nandsim *ns, int num)
                        memset(ns->file_buf, 0xff, ns->geom.pgszoob);
                } else {
                        all = 0;
-                       tx = read_file(ns, ns->cfile, pg_off, num, off);
+                       tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
                        if (tx != num) {
                                NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
@@ -1507,14 +1527,15 @@ static int prog_page(struct nandsim *ns, int num)
                        pg_off[i] &= ns->buf.byte[i];
                if (all) {
                        loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
-                       tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
+                       tx = ns_write_file(ns, ns->cfile, ns->file_buf,
+                                          ns->geom.pgszoob, pos);
                        if (tx != ns->geom.pgszoob) {
                                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
                        }
                        __set_bit(ns->regs.row, ns->pages_written);
                } else {
-                       tx = write_file(ns, ns->cfile, pg_off, num, off);
+                       tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
                        if (tx != num) {
                                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
@@ -1552,7 +1573,7 @@ static int prog_page(struct nandsim *ns, int num)
  *
  * RETURNS: 0 if success, -1 if error.
  */
-static int do_state_action(struct nandsim *ns, uint32_t action)
+static int ns_do_state_action(struct nandsim *ns, uint32_t action)
 {
        int num;
        int busdiv = ns->busw == 8 ? 1 : 2;
@@ -1579,7 +1600,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                        break;
                }
                num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
-               read_page(ns, num);
+               ns_read_page(ns, num);
 
                NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
                        num, NS_RAW_OFFSET(ns) + ns->regs.off);
@@ -1622,14 +1643,14 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                                ns->regs.row, NS_RAW_OFFSET(ns));
                NS_LOG("erase sector %u\n", erase_block_no);
 
-               erase_sector(ns);
+               ns_erase_sector(ns);
 
                NS_MDELAY(erase_delay);
 
                if (erase_block_wear)
-                       update_wear(erase_block_no);
+                       ns_update_wear(erase_block_no);
 
-               if (erase_error(erase_block_no)) {
+               if (ns_erase_error(erase_block_no)) {
                        NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
                        return -1;
                }
@@ -1653,7 +1674,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                        return -1;
                }
 
-               if (prog_page(ns, num) == -1)
+               if (ns_prog_page(ns, num) == -1)
                        return -1;
 
                page_no = ns->regs.row;
@@ -1665,7 +1686,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                NS_UDELAY(programm_delay);
                NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
 
-               if (write_error(page_no)) {
+               if (ns_write_error(page_no)) {
                        NS_WARN("simulating write failure in page %u\n", page_no);
                        return -1;
                }
@@ -1702,7 +1723,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
 /*
  * Switch simulator's state.
  */
-static void switch_state(struct nandsim *ns)
+static void ns_switch_state(struct nandsim *ns)
 {
        if (ns->op) {
                /*
@@ -1716,11 +1737,13 @@ static void switch_state(struct nandsim *ns)
 
                NS_DBG("switch_state: operation is known, switch to the next state, "
                        "state: %s, nxstate: %s\n",
-                       get_state_name(ns->state), get_state_name(ns->nxstate));
+                      ns_get_state_name(ns->state),
+                      ns_get_state_name(ns->nxstate));
 
                /* See, whether we need to do some action */
-               if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-                       switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+               if ((ns->state & ACTION_MASK) &&
+                   ns_do_state_action(ns, ns->state) < 0) {
+                       ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                        return;
                }
 
@@ -1734,15 +1757,16 @@ static void switch_state(struct nandsim *ns)
                 *  The only event causing the switch_state function to
                 *  be called with yet unknown operation is new command.
                 */
-               ns->state = get_state_by_command(ns->regs.command);
+               ns->state = ns_get_state_by_command(ns->regs.command);
 
                NS_DBG("switch_state: operation is unknown, try to find it\n");
 
-               if (find_operation(ns, 0) != 0)
+               if (!ns_find_operation(ns, 0))
                        return;
 
-               if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-                       switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+               if ((ns->state & ACTION_MASK) &&
+                   ns_do_state_action(ns, ns->state) < 0) {
+                       ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                        return;
                }
        }
@@ -1770,7 +1794,7 @@ static void switch_state(struct nandsim *ns)
 
                NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
 
-               switch_to_ready_state(ns, status);
+               ns_switch_to_ready_state(ns, status);
 
                return;
        } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
@@ -1784,7 +1808,8 @@ static void switch_state(struct nandsim *ns)
 
                NS_DBG("switch_state: the next state is data I/O, switch, "
                        "state: %s, nxstate: %s\n",
-                       get_state_name(ns->state), get_state_name(ns->nxstate));
+                      ns_get_state_name(ns->state),
+                      ns_get_state_name(ns->nxstate));
 
                /*
                 * Set the internal register to the count of bytes which
@@ -1862,8 +1887,8 @@ static u_char ns_nand_read_byte(struct nand_chip *chip)
                return outb;
        }
        if (!(ns->state & STATE_DATAOUT_MASK)) {
-               NS_WARN("read_byte: unexpected data output cycle, state is %s "
-                       "return %#x\n", get_state_name(ns->state), (uint)outb);
+               NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
+                       ns_get_state_name(ns->state), (uint)outb);
                return outb;
        }
 
@@ -1902,7 +1927,7 @@ static u_char ns_nand_read_byte(struct nand_chip *chip)
                NS_DBG("read_byte: all bytes were read\n");
 
                if (NS_STATE(ns->nxstate) == STATE_READY)
-                       switch_state(ns);
+                       ns_switch_state(ns);
        }
 
        return outb;
@@ -1929,12 +1954,12 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
 
                if (byte == NAND_CMD_RESET) {
                        NS_LOG("reset chip\n");
-                       switch_to_ready_state(ns, NS_STATUS_OK(ns));
+                       ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
                        return;
                }
 
                /* Check that the command byte is correct */
-               if (check_command(byte)) {
+               if (ns_check_command(byte)) {
                        NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
                        return;
                }
@@ -1943,7 +1968,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
                        || NS_STATE(ns->state) == STATE_DATAOUT) {
                        int row = ns->regs.row;
 
-                       switch_state(ns);
+                       ns_switch_state(ns);
                        if (byte == NAND_CMD_RNDOUT)
                                ns->regs.row = row;
                }
@@ -1958,16 +1983,17 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
                                 * was expected but command was input. In this case ignore
                                 * previous command(s)/state(s) and accept the last one.
                                 */
-                               NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
-                                       "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+                               NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
+                                       (uint)byte,
+                                       ns_get_state_name(ns->nxstate));
                        }
-                       switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+                       ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                }
 
                NS_DBG("command byte corresponding to %s state accepted\n",
-                       get_state_name(get_state_by_command(byte)));
+                       ns_get_state_name(ns_get_state_by_command(byte)));
                ns->regs.command = byte;
-               switch_state(ns);
+               ns_switch_state(ns);
 
        } else if (ns->lines.ale == 1) {
                /*
@@ -1978,11 +2004,13 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
 
                        NS_DBG("write_byte: operation isn't known yet, identify it\n");
 
-                       if (find_operation(ns, 1) < 0)
+                       if (ns_find_operation(ns, 1) < 0)
                                return;
 
-                       if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-                               switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+                       if ((ns->state & ACTION_MASK) &&
+                           ns_do_state_action(ns, ns->state) < 0) {
+                               ns_switch_to_ready_state(ns,
+                                                        NS_STATUS_FAILED(ns));
                                return;
                        }
 
@@ -2004,20 +2032,20 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
 
                /* Check that chip is expecting address */
                if (!(ns->nxstate & STATE_ADDR_MASK)) {
-                       NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
-                               "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
-                       switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+                       NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
+                              (uint)byte, ns_get_state_name(ns->nxstate));
+                       ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                        return;
                }
 
                /* Check if this is expected byte */
                if (ns->regs.count == ns->regs.num) {
                        NS_ERR("write_byte: no more address bytes expected\n");
-                       switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+                       ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                        return;
                }
 
-               accept_addr_byte(ns, byte);
+               ns_accept_addr_byte(ns, byte);
 
                ns->regs.count += 1;
 
@@ -2026,7 +2054,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
 
                if (ns->regs.count == ns->regs.num) {
                        NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
-                       switch_state(ns);
+                       ns_switch_state(ns);
                }
 
        } else {
@@ -2036,10 +2064,10 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
 
                /* Check that chip is expecting data input */
                if (!(ns->state & STATE_DATAIN_MASK)) {
-                       NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
-                               "switch to %s\n", (uint)byte,
-                               get_state_name(ns->state), get_state_name(STATE_READY));
-                       switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+                       NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
+                              (uint)byte, ns_get_state_name(ns->state),
+                              ns_get_state_name(STATE_READY));
+                       ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                        return;
                }
 
@@ -2069,16 +2097,16 @@ static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
 
        /* Check that chip is expecting data input */
        if (!(ns->state & STATE_DATAIN_MASK)) {
-               NS_ERR("write_buf: data input isn't expected, state is %s, "
-                       "switch to STATE_READY\n", get_state_name(ns->state));
-               switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+               NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
+                      ns_get_state_name(ns->state));
+               ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                return;
        }
 
        /* Check if these are expected bytes */
        if (ns->regs.count + len > ns->regs.num) {
                NS_ERR("write_buf: too many input bytes\n");
-               switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+               ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                return;
        }
 
@@ -2105,7 +2133,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
        }
        if (!(ns->state & STATE_DATAOUT_MASK)) {
                NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
-                       get_state_name(ns->state));
+                       ns_get_state_name(ns->state));
                return;
        }
 
@@ -2121,7 +2149,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
        /* Check if these are expected bytes */
        if (ns->regs.count + len > ns->regs.num) {
                NS_ERR("read_buf: too many bytes to read\n");
-               switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+               ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                return;
        }
 
@@ -2130,7 +2158,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
 
        if (ns->regs.count == ns->regs.num) {
                if (NS_STATE(ns->nxstate) == STATE_READY)
-                       switch_state(ns);
+                       ns_switch_state(ns);
        }
 
        return;
@@ -2144,6 +2172,9 @@ static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
        const struct nand_op_instr *instr = NULL;
        struct nandsim *ns = nand_get_controller_data(chip);
 
+       if (check_only)
+               return 0;
+
        ns->lines.ce = 1;
 
        for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -2224,9 +2255,10 @@ static const struct nand_controller_ops ns_controller_ops = {
  */
 static int __init ns_init_module(void)
 {
+       struct list_head *pos, *n;
        struct nand_chip *chip;
        struct nandsim *ns;
-       int retval = -ENOMEM, i;
+       int ret;
 
        if (bus_width != 8 && bus_width != 16) {
                NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
@@ -2259,8 +2291,8 @@ static int __init ns_init_module(void)
                break;
        default:
                NS_ERR("bbt has to be 0..2\n");
-               retval = -EINVAL;
-               goto error;
+               ret = -EINVAL;
+               goto free_ns_struct;
        }
        /*
         * Perform minimum nandsim structure initialization to handle
@@ -2285,23 +2317,26 @@ static int __init ns_init_module(void)
 
        nsmtd->owner = THIS_MODULE;
 
-       if ((retval = parse_weakblocks()) != 0)
-               goto error;
+       ret = ns_parse_weakblocks();
+       if (ret)
+               goto free_ns_struct;
 
-       if ((retval = parse_weakpages()) != 0)
-               goto error;
+       ret = ns_parse_weakpages();
+       if (ret)
+               goto free_wb_list;
 
-       if ((retval = parse_gravepages()) != 0)
-               goto error;
+       ret = ns_parse_gravepages();
+       if (ret)
+               goto free_wp_list;
 
        nand_controller_init(&ns->base);
        ns->base.ops = &ns_controller_ops;
        chip->controller = &ns->base;
 
-       retval = nand_scan(chip, 1);
-       if (retval) {
+       ret = nand_scan(chip, 1);
+       if (ret) {
                NS_ERR("Could not scan NAND Simulator device\n");
-               goto error;
+               goto free_gp_list;
        }
 
        if (overridesize) {
@@ -2313,8 +2348,8 @@ static int __init ns_init_module(void)
 
                if (new_size >> overridesize != nsmtd->erasesize) {
                        NS_ERR("overridesize is too big\n");
-                       retval = -EINVAL;
-                       goto err_exit;
+                       ret = -EINVAL;
+                       goto cleanup_nand;
                }
 
                /* N.B. This relies on nand_scan not doing anything with the size before we change it */
@@ -2325,39 +2360,60 @@ static int __init ns_init_module(void)
                chip->pagemask = (targetsize >> chip->page_shift) - 1;
        }
 
-       if ((retval = setup_wear_reporting(nsmtd)) != 0)
-               goto err_exit;
+       ret = ns_setup_wear_reporting(nsmtd);
+       if (ret)
+               goto cleanup_nand;
 
-       if ((retval = init_nandsim(nsmtd)) != 0)
-               goto err_exit;
+       ret = ns_init(nsmtd);
+       if (ret)
+               goto free_ebw;
 
-       if ((retval = nand_create_bbt(chip)) != 0)
-               goto err_exit;
+       ret = nand_create_bbt(chip);
+       if (ret)
+               goto free_ns_object;
 
-       if ((retval = parse_badblocks(ns, nsmtd)) != 0)
-               goto err_exit;
+       ret = ns_parse_badblocks(ns, nsmtd);
+       if (ret)
+               goto free_ns_object;
 
        /* Register NAND partitions */
-       retval = mtd_device_register(nsmtd, &ns->partitions[0],
-                                    ns->nbparts);
-       if (retval != 0)
-               goto err_exit;
+       ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
+       if (ret)
+               goto free_ns_object;
 
-       if ((retval = nandsim_debugfs_create(ns)) != 0)
-               goto err_exit;
+       ret = ns_debugfs_create(ns);
+       if (ret)
+               goto unregister_mtd;
 
         return 0;
 
-err_exit:
-       free_nandsim(ns);
-       nand_release(chip);
-       for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
-               kfree(ns->partitions[i].name);
-error:
+unregister_mtd:
+       WARN_ON(mtd_device_unregister(nsmtd));
+free_ns_object:
+       ns_free(ns);
+free_ebw:
+       kfree(erase_block_wear);
+cleanup_nand:
+       nand_cleanup(chip);
+free_gp_list:
+       list_for_each_safe(pos, n, &grave_pages) {
+               list_del(pos);
+               kfree(list_entry(pos, struct grave_page, list));
+       }
+free_wp_list:
+       list_for_each_safe(pos, n, &weak_pages) {
+               list_del(pos);
+               kfree(list_entry(pos, struct weak_page, list));
+       }
+free_wb_list:
+       list_for_each_safe(pos, n, &weak_blocks) {
+               list_del(pos);
+               kfree(list_entry(pos, struct weak_block, list));
+       }
+free_ns_struct:
        kfree(ns);
-       free_lists();
 
-       return retval;
+       return ret;
 }
 
 module_init(ns_init_module);
@@ -2369,14 +2425,30 @@ static void __exit ns_cleanup_module(void)
 {
        struct nand_chip *chip = mtd_to_nand(nsmtd);
        struct nandsim *ns = nand_get_controller_data(chip);
-       int i;
+       struct list_head *pos, *n;
 
-       free_nandsim(ns);    /* Free nandsim private resources */
-       nand_release(chip); /* Unregister driver */
-       for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
-               kfree(ns->partitions[i].name);
-       kfree(ns);        /* Free other structures */
-       free_lists();
+       ns_debugfs_remove(ns);
+       WARN_ON(mtd_device_unregister(nsmtd));
+       ns_free(ns);
+       kfree(erase_block_wear);
+       nand_cleanup(chip);
+
+       list_for_each_safe(pos, n, &grave_pages) {
+               list_del(pos);
+               kfree(list_entry(pos, struct grave_page, list));
+       }
+
+       list_for_each_safe(pos, n, &weak_pages) {
+               list_del(pos);
+               kfree(list_entry(pos, struct weak_page, list));
+       }
+
+       list_for_each_safe(pos, n, &weak_blocks) {
+               list_del(pos);
+               kfree(list_entry(pos, struct weak_block, list));
+       }
+
+       kfree(ns);
 }
 
 module_exit(ns_cleanup_module);
index d324396..ed38338 100644 (file)
@@ -244,9 +244,13 @@ static int ndfc_probe(struct platform_device *ofdev)
 static int ndfc_remove(struct platform_device *ofdev)
 {
        struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
-       struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
+       struct nand_chip *chip = &ndfc->chip;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       int ret;
 
-       nand_release(&ndfc->chip);
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(chip);
        kfree(mtd->name);
 
        return 0;
index ad77c11..eb7fcfd 100644 (file)
@@ -2283,14 +2283,18 @@ static int omap_nand_remove(struct platform_device *pdev)
        struct mtd_info *mtd = platform_get_drvdata(pdev);
        struct nand_chip *nand_chip = mtd_to_nand(mtd);
        struct omap_nand_info *info = mtd_to_omap(mtd);
+       int ret;
+
        if (nand_chip->ecc.priv) {
                nand_bch_free(nand_chip->ecc.priv);
                nand_chip->ecc.priv = NULL;
        }
        if (info->dma)
                dma_release_channel(info->dma);
-       nand_release(nand_chip);
-       return 0;
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(nand_chip);
+       return ret;
 }
 
 static const struct of_device_id omap_nand_ids[] = {
index 3fa0e2c..078b102 100644 (file)
@@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        if (pm_runtime_get_sync(&pdev->dev) < 0) {
                ret = -EINVAL;
+               pm_runtime_put_sync(&pdev->dev);
                pm_runtime_disable(&pdev->dev);
                dev_err(&pdev->dev, "can't enable clock\n");
                return ret;
index d27b39a..880b54c 100644 (file)
@@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
        mtd->name = "orion_nand";
        ret = mtd_device_register(mtd, board->parts, board->nr_parts);
        if (ret) {
-               nand_release(nc);
+               nand_cleanup(nc);
                goto no_dev;
        }
 
@@ -195,8 +195,12 @@ static int orion_nand_remove(struct platform_device *pdev)
 {
        struct orion_nand_info *info = platform_get_drvdata(pdev);
        struct nand_chip *chip = &info->chip;
+       int ret;
 
-       nand_release(chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+
+       nand_cleanup(chip);
 
        clk_disable_unprepare(info->clk);
 
index c43cb4d..8d0d76a 100644 (file)
@@ -32,6 +32,7 @@ struct oxnas_nand_ctrl {
        void __iomem *io_base;
        struct clk *clk;
        struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+       unsigned int nchips;
 };
 
 static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
@@ -79,9 +80,9 @@ static int oxnas_nand_probe(struct platform_device *pdev)
        struct nand_chip *chip;
        struct mtd_info *mtd;
        struct resource *res;
-       int nchips = 0;
        int count = 0;
        int err = 0;
+       int i;
 
        /* Allocate memory for the device structure (and zero it) */
        oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
@@ -140,17 +141,15 @@ static int oxnas_nand_probe(struct platform_device *pdev)
                        goto err_release_child;
 
                err = mtd_device_register(mtd, NULL, 0);
-               if (err) {
-                       nand_release(chip);
-                       goto err_release_child;
-               }
+               if (err)
+                       goto err_cleanup_nand;
 
-               oxnas->chips[nchips] = chip;
-               ++nchips;
+               oxnas->chips[oxnas->nchips] = chip;
+               ++oxnas->nchips;
        }
 
        /* Exit if no chips found */
-       if (!nchips) {
+       if (!oxnas->nchips) {
                err = -ENODEV;
                goto err_clk_unprepare;
        }
@@ -159,8 +158,17 @@ static int oxnas_nand_probe(struct platform_device *pdev)
 
        return 0;
 
+err_cleanup_nand:
+       nand_cleanup(chip);
 err_release_child:
        of_node_put(nand_np);
+
+       for (i = 0; i < oxnas->nchips; i++) {
+               chip = oxnas->chips[i];
+               WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+               nand_cleanup(chip);
+       }
+
 err_clk_unprepare:
        clk_disable_unprepare(oxnas->clk);
        return err;
@@ -169,9 +177,14 @@ err_clk_unprepare:
 static int oxnas_nand_remove(struct platform_device *pdev)
 {
        struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+       struct nand_chip *chip;
+       int i;
 
-       if (oxnas->chips[0])
-               nand_release(oxnas->chips[0]);
+       for (i = 0; i < oxnas->nchips; i++) {
+               chip = oxnas->chips[i];
+               WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+               nand_cleanup(chip);
+       }
 
        clk_disable_unprepare(oxnas->clk);
 
index 9cfe739..d8eca8c 100644 (file)
@@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
        if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
                dev_err(dev, "Unable to register MTD device\n");
                err = -ENODEV;
-               goto out_lpc;
+               goto out_cleanup_nand;
        }
 
        dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
@@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
 
        return 0;
 
+ out_cleanup_nand:
+       nand_cleanup(chip);
  out_lpc:
        release_region(lpcctl, 4);
  out_ior:
@@ -167,6 +169,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
 static int pasemi_nand_remove(struct platform_device *ofdev)
 {
        struct nand_chip *chip;
+       int ret;
 
        if (!pasemi_nand_mtd)
                return 0;
@@ -174,7 +177,9 @@ static int pasemi_nand_remove(struct platform_device *ofdev)
        chip = mtd_to_nand(pasemi_nand_mtd);
 
        /* Release resources, unregister device */
-       nand_release(chip);
+       ret = mtd_device_unregister(pasemi_nand_mtd);
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        release_region(lpcctl, 4);
 
index dc0f307..556182f 100644 (file)
@@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev)
        if (!err)
                return err;
 
-       nand_release(&data->chip);
+       nand_cleanup(&data->chip);
 out:
        if (pdata->ctrl.remove)
                pdata->ctrl.remove(pdev);
@@ -106,8 +106,12 @@ static int plat_nand_remove(struct platform_device *pdev)
 {
        struct plat_nand_data *data = platform_get_drvdata(pdev);
        struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+       struct nand_chip *chip = &data->chip;
+       int ret;
 
-       nand_release(&data->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        if (pdata->ctrl.remove)
                pdata->ctrl.remove(pdev);
 
index 5b11c70..f1daf33 100644 (file)
@@ -2836,7 +2836,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
        chip->legacy.block_markbad      = qcom_nandc_block_markbad;
 
        chip->controller = &nandc->controller;
-       chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
+       chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
                         NAND_SKIP_BBTSCAN;
 
        /* set up initial status value */
@@ -3005,10 +3005,15 @@ static int qcom_nandc_remove(struct platform_device *pdev)
        struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct qcom_nand_host *host;
+       struct nand_chip *chip;
+       int ret;
 
-       list_for_each_entry(host, &nandc->host_list, node)
-               nand_release(&host->chip);
-
+       list_for_each_entry(host, &nandc->host_list, node) {
+               chip = &host->chip;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+       }
 
        qcom_nandc_unalloc(nandc);
 
index 7777425..f865e3a 100644 (file)
@@ -651,7 +651,8 @@ static int r852_register_nand_device(struct r852_device *dev)
        dev->card_registered = 1;
        return 0;
 error3:
-       nand_release(dev->chip);
+       WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
+       nand_cleanup(dev->chip);
 error1:
        /* Force card redetect */
        dev->card_detected = 0;
@@ -670,7 +671,8 @@ static void r852_unregister_nand_device(struct r852_device *dev)
                return;
 
        device_remove_file(&mtd->dev, &dev_attr_media_type);
-       nand_release(dev->chip);
+       WARN_ON(mtd_device_unregister(mtd));
+       nand_cleanup(dev->chip);
        r852_engine_disable(dev);
        dev->card_registered = 0;
 }
index 0009c18..f86dff3 100644 (file)
@@ -779,7 +779,8 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
 
                for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
                        pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
-                       nand_release(&ptr->chip);
+                       WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
+                       nand_cleanup(&ptr->chip);
                }
        }
 
index 058e99d..a661b8b 100644 (file)
@@ -1204,9 +1204,13 @@ err_chip:
 static int flctl_remove(struct platform_device *pdev)
 {
        struct sh_flctl *flctl = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &flctl->chip;
+       int ret;
 
        flctl_release_dma(flctl);
-       nand_release(&flctl->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        pm_runtime_disable(&pdev->dev);
 
        return 0;
index b47a9ea..51286f7 100644 (file)
@@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
        return 0;
 
 err_add:
-       nand_release(this);
+       nand_cleanup(this);
 
 err_scan:
        iounmap(sharpsl->io);
@@ -199,13 +199,19 @@ err_get_res:
 static int sharpsl_nand_remove(struct platform_device *pdev)
 {
        struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &sharpsl->chip;
+       int ret;
 
-       /* Release resources, unregister device */
-       nand_release(&sharpsl->chip);
+       /* Unregister device */
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+
+       /* Release resources */
+       nand_cleanup(chip);
 
        iounmap(sharpsl->io);
 
-       /* Free the MTD device structure */
+       /* Free the driver's structure */
        kfree(sharpsl);
 
        return 0;
index 20f40c0..243b34c 100644 (file)
@@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
        if (!res)
                return res;
 
-       nand_release(nand_chip);
+       nand_cleanup(nand_chip);
 
 out:
        iounmap(host->io_base);
@@ -182,8 +182,12 @@ out:
 static int socrates_nand_remove(struct platform_device *ofdev)
 {
        struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+       struct nand_chip *chip = &host->nand_chip;
+       int ret;
 
-       nand_release(&host->nand_chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        iounmap(host->io_base);
 
index b6d45cd..65c9d17 100644 (file)
@@ -4,6 +4,7 @@
  * Author: Christophe Kerello <christophe.kerello@st.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -37,8 +38,7 @@
 /* Max ECC buffer length */
 #define FMC2_MAX_ECC_BUF_LEN           (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
 
-#define FMC2_TIMEOUT_US                        1000
-#define FMC2_TIMEOUT_MS                        1000
+#define FMC2_TIMEOUT_MS                        5000
 
 /* Timings */
 #define FMC2_THIZ                      1
 /* Register: FMC2_PCR */
 #define FMC2_PCR_PWAITEN               BIT(1)
 #define FMC2_PCR_PBKEN                 BIT(2)
-#define FMC2_PCR_PWID_MASK             GENMASK(5, 4)
-#define FMC2_PCR_PWID(x)               (((x) & 0x3) << 4)
+#define FMC2_PCR_PWID                  GENMASK(5, 4)
 #define FMC2_PCR_PWID_BUSWIDTH_8       0
 #define FMC2_PCR_PWID_BUSWIDTH_16      1
 #define FMC2_PCR_ECCEN                 BIT(6)
 #define FMC2_PCR_ECCALG                        BIT(8)
-#define FMC2_PCR_TCLR_MASK             GENMASK(12, 9)
-#define FMC2_PCR_TCLR(x)               (((x) & 0xf) << 9)
+#define FMC2_PCR_TCLR                  GENMASK(12, 9)
 #define FMC2_PCR_TCLR_DEFAULT          0xf
-#define FMC2_PCR_TAR_MASK              GENMASK(16, 13)
-#define FMC2_PCR_TAR(x)                        (((x) & 0xf) << 13)
+#define FMC2_PCR_TAR                   GENMASK(16, 13)
 #define FMC2_PCR_TAR_DEFAULT           0xf
-#define FMC2_PCR_ECCSS_MASK            GENMASK(19, 17)
-#define FMC2_PCR_ECCSS(x)              (((x) & 0x7) << 17)
+#define FMC2_PCR_ECCSS                 GENMASK(19, 17)
 #define FMC2_PCR_ECCSS_512             1
 #define FMC2_PCR_ECCSS_2048            3
 #define FMC2_PCR_BCHECC                        BIT(24)
 #define FMC2_SR_NWRF                   BIT(6)
 
 /* Register: FMC2_PMEM */
-#define FMC2_PMEM_MEMSET(x)            (((x) & 0xff) << 0)
-#define FMC2_PMEM_MEMWAIT(x)           (((x) & 0xff) << 8)
-#define FMC2_PMEM_MEMHOLD(x)           (((x) & 0xff) << 16)
-#define FMC2_PMEM_MEMHIZ(x)            (((x) & 0xff) << 24)
+#define FMC2_PMEM_MEMSET               GENMASK(7, 0)
+#define FMC2_PMEM_MEMWAIT              GENMASK(15, 8)
+#define FMC2_PMEM_MEMHOLD              GENMASK(23, 16)
+#define FMC2_PMEM_MEMHIZ               GENMASK(31, 24)
 #define FMC2_PMEM_DEFAULT              0x0a0a0a0a
 
 /* Register: FMC2_PATT */
-#define FMC2_PATT_ATTSET(x)            (((x) & 0xff) << 0)
-#define FMC2_PATT_ATTWAIT(x)           (((x) & 0xff) << 8)
-#define FMC2_PATT_ATTHOLD(x)           (((x) & 0xff) << 16)
-#define FMC2_PATT_ATTHIZ(x)            (((x) & 0xff) << 24)
+#define FMC2_PATT_ATTSET               GENMASK(7, 0)
+#define FMC2_PATT_ATTWAIT              GENMASK(15, 8)
+#define FMC2_PATT_ATTHOLD              GENMASK(23, 16)
+#define FMC2_PATT_ATTHIZ               GENMASK(31, 24)
 #define FMC2_PATT_DEFAULT              0x0a0a0a0a
 
 /* Register: FMC2_ISR */
 /* Register: FMC2_CSQCFGR1 */
 #define FMC2_CSQCFGR1_CMD2EN           BIT(1)
 #define FMC2_CSQCFGR1_DMADEN           BIT(2)
-#define FMC2_CSQCFGR1_ACYNBR(x)                (((x) & 0x7) << 4)
-#define FMC2_CSQCFGR1_CMD1(x)          (((x) & 0xff) << 8)
-#define FMC2_CSQCFGR1_CMD2(x)          (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR1_ACYNBR           GENMASK(6, 4)
+#define FMC2_CSQCFGR1_CMD1             GENMASK(15, 8)
+#define FMC2_CSQCFGR1_CMD2             GENMASK(23, 16)
 #define FMC2_CSQCFGR1_CMD1T            BIT(24)
 #define FMC2_CSQCFGR1_CMD2T            BIT(25)
 
 #define FMC2_CSQCFGR2_SQSDTEN          BIT(0)
 #define FMC2_CSQCFGR2_RCMD2EN          BIT(1)
 #define FMC2_CSQCFGR2_DMASEN           BIT(2)
-#define FMC2_CSQCFGR2_RCMD1(x)         (((x) & 0xff) << 8)
-#define FMC2_CSQCFGR2_RCMD2(x)         (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR2_RCMD1            GENMASK(15, 8)
+#define FMC2_CSQCFGR2_RCMD2            GENMASK(23, 16)
 #define FMC2_CSQCFGR2_RCMD1T           BIT(24)
 #define FMC2_CSQCFGR2_RCMD2T           BIT(25)
 
 /* Register: FMC2_CSQCFGR3 */
-#define FMC2_CSQCFGR3_SNBR(x)          (((x) & 0x1f) << 8)
+#define FMC2_CSQCFGR3_SNBR             GENMASK(13, 8)
 #define FMC2_CSQCFGR3_AC1T             BIT(16)
 #define FMC2_CSQCFGR3_AC2T             BIT(17)
 #define FMC2_CSQCFGR3_AC3T             BIT(18)
 #define FMC2_CSQCFGR3_RAC2T            BIT(23)
 
 /* Register: FMC2_CSQCAR1 */
-#define FMC2_CSQCAR1_ADDC1(x)          (((x) & 0xff) << 0)
-#define FMC2_CSQCAR1_ADDC2(x)          (((x) & 0xff) << 8)
-#define FMC2_CSQCAR1_ADDC3(x)          (((x) & 0xff) << 16)
-#define FMC2_CSQCAR1_ADDC4(x)          (((x) & 0xff) << 24)
+#define FMC2_CSQCAR1_ADDC1             GENMASK(7, 0)
+#define FMC2_CSQCAR1_ADDC2             GENMASK(15, 8)
+#define FMC2_CSQCAR1_ADDC3             GENMASK(23, 16)
+#define FMC2_CSQCAR1_ADDC4             GENMASK(31, 24)
 
 /* Register: FMC2_CSQCAR2 */
-#define FMC2_CSQCAR2_ADDC5(x)          (((x) & 0xff) << 0)
-#define FMC2_CSQCAR2_NANDCEN(x)                (((x) & 0x3) << 10)
-#define FMC2_CSQCAR2_SAO(x)            (((x) & 0xffff) << 16)
+#define FMC2_CSQCAR2_ADDC5             GENMASK(7, 0)
+#define FMC2_CSQCAR2_NANDCEN           GENMASK(11, 10)
+#define FMC2_CSQCAR2_SAO               GENMASK(31, 16)
 
 /* Register: FMC2_CSQIER */
 #define FMC2_CSQIER_TCIE               BIT(0)
 /* Register: FMC2_BCHDSR0 */
 #define FMC2_BCHDSR0_DUE               BIT(0)
 #define FMC2_BCHDSR0_DEF               BIT(1)
-#define FMC2_BCHDSR0_DEN_MASK          GENMASK(7, 4)
-#define FMC2_BCHDSR0_DEN_SHIFT         4
+#define FMC2_BCHDSR0_DEN               GENMASK(7, 4)
 
 /* Register: FMC2_BCHDSR1 */
-#define FMC2_BCHDSR1_EBP1_MASK         GENMASK(12, 0)
-#define FMC2_BCHDSR1_EBP2_MASK         GENMASK(28, 16)
-#define FMC2_BCHDSR1_EBP2_SHIFT                16
+#define FMC2_BCHDSR1_EBP1              GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2              GENMASK(28, 16)
 
 /* Register: FMC2_BCHDSR2 */
-#define FMC2_BCHDSR2_EBP3_MASK         GENMASK(12, 0)
-#define FMC2_BCHDSR2_EBP4_MASK         GENMASK(28, 16)
-#define FMC2_BCHDSR2_EBP4_SHIFT                16
+#define FMC2_BCHDSR2_EBP3              GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4              GENMASK(28, 16)
 
 /* Register: FMC2_BCHDSR3 */
-#define FMC2_BCHDSR3_EBP5_MASK         GENMASK(12, 0)
-#define FMC2_BCHDSR3_EBP6_MASK         GENMASK(28, 16)
-#define FMC2_BCHDSR3_EBP6_SHIFT                16
+#define FMC2_BCHDSR3_EBP5              GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6              GENMASK(28, 16)
 
 /* Register: FMC2_BCHDSR4 */
-#define FMC2_BCHDSR4_EBP7_MASK         GENMASK(12, 0)
-#define FMC2_BCHDSR4_EBP8_MASK         GENMASK(28, 16)
-#define FMC2_BCHDSR4_EBP8_SHIFT                16
+#define FMC2_BCHDSR4_EBP7              GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8              GENMASK(28, 16)
 
 enum stm32_fmc2_ecc {
        FMC2_ECC_HAM = 1,
@@ -281,43 +272,41 @@ static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
        return container_of(base, struct stm32_fmc2_nfc, base);
 }
 
-/* Timings configuration */
-static void stm32_fmc2_timings_init(struct nand_chip *chip)
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
        struct stm32_fmc2_timings *timings = &nand->timings;
-       u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
        u32 pmem, patt;
 
        /* Set tclr/tar timings */
-       pcr &= ~FMC2_PCR_TCLR_MASK;
-       pcr |= FMC2_PCR_TCLR(timings->tclr);
-       pcr &= ~FMC2_PCR_TAR_MASK;
-       pcr |= FMC2_PCR_TAR(timings->tar);
+       pcr &= ~FMC2_PCR_TCLR;
+       pcr |= FIELD_PREP(FMC2_PCR_TCLR, timings->tclr);
+       pcr &= ~FMC2_PCR_TAR;
+       pcr |= FIELD_PREP(FMC2_PCR_TAR, timings->tar);
 
        /* Set tset/twait/thold/thiz timings in common bank */
-       pmem = FMC2_PMEM_MEMSET(timings->tset_mem);
-       pmem |= FMC2_PMEM_MEMWAIT(timings->twait);
-       pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem);
-       pmem |= FMC2_PMEM_MEMHIZ(timings->thiz);
+       pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
+       pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
+       pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
+       pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
 
        /* Set tset/twait/thold/thiz timings in attribut bank */
-       patt = FMC2_PATT_ATTSET(timings->tset_att);
-       patt |= FMC2_PATT_ATTWAIT(timings->twait);
-       patt |= FMC2_PATT_ATTHOLD(timings->thold_att);
-       patt |= FMC2_PATT_ATTHIZ(timings->thiz);
-
-       writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
-       writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM);
-       writel_relaxed(patt, fmc2->io_base + FMC2_PATT);
+       patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
+       patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
+       patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
+       patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
+
+       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+       writel_relaxed(pmem, nfc->io_base + FMC2_PMEM);
+       writel_relaxed(patt, nfc->io_base + FMC2_PATT);
 }
 
-/* Controller configuration */
-static void stm32_fmc2_setup(struct nand_chip *chip)
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
-       u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
 
        /* Configure ECC algorithm (default configuration is Hamming) */
        pcr &= ~FMC2_PCR_ECCALG;
@@ -330,195 +319,182 @@ static void stm32_fmc2_setup(struct nand_chip *chip)
        }
 
        /* Set buswidth */
-       pcr &= ~FMC2_PCR_PWID_MASK;
+       pcr &= ~FMC2_PCR_PWID;
        if (chip->options & NAND_BUSWIDTH_16)
-               pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+               pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
 
        /* Set ECC sector size */
-       pcr &= ~FMC2_PCR_ECCSS_MASK;
-       pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512);
+       pcr &= ~FMC2_PCR_ECCSS;
+       pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
 
-       writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
 }
 
-/* Select target */
-static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr)
+static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
        struct dma_slave_config dma_cfg;
        int ret;
 
-       if (nand->cs_used[chipnr] == fmc2->cs_sel)
+       if (nand->cs_used[chipnr] == nfc->cs_sel)
                return 0;
 
-       fmc2->cs_sel = nand->cs_used[chipnr];
+       nfc->cs_sel = nand->cs_used[chipnr];
+       stm32_fmc2_nfc_setup(chip);
+       stm32_fmc2_nfc_timings_init(chip);
 
-       /* FMC2 setup routine */
-       stm32_fmc2_setup(chip);
-
-       /* Apply timings */
-       stm32_fmc2_timings_init(chip);
-
-       if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) {
+       if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
                memset(&dma_cfg, 0, sizeof(dma_cfg));
-               dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel];
-               dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+               dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+               dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
                dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
                dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
                dma_cfg.src_maxburst = 32;
                dma_cfg.dst_maxburst = 32;
 
-               ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg);
+               ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
                if (ret) {
-                       dev_err(fmc2->dev, "tx DMA engine slave config failed\n");
+                       dev_err(nfc->dev, "tx DMA engine slave config failed\n");
                        return ret;
                }
 
-               ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg);
+               ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
                if (ret) {
-                       dev_err(fmc2->dev, "rx DMA engine slave config failed\n");
+                       dev_err(nfc->dev, "rx DMA engine slave config failed\n");
                        return ret;
                }
        }
 
-       if (fmc2->dma_ecc_ch) {
+       if (nfc->dma_ecc_ch) {
                /*
                 * Hamming: we read HECCR register
                 * BCH4/BCH8: we read BCHDSRSx registers
                 */
                memset(&dma_cfg, 0, sizeof(dma_cfg));
-               dma_cfg.src_addr = fmc2->io_phys_addr;
+               dma_cfg.src_addr = nfc->io_phys_addr;
                dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
                                    FMC2_HECCR : FMC2_BCHDSR0;
                dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
-               ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg);
+               ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
                if (ret) {
-                       dev_err(fmc2->dev, "ECC DMA engine slave config failed\n");
+                       dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
                        return ret;
                }
 
                /* Calculate ECC length needed for one sector */
-               fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
-                                   FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+               nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+                                  FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
        }
 
        return 0;
 }
 
-/* Set bus width to 16-bit or 8-bit */
-static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set)
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
 {
-       u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
 
-       pcr &= ~FMC2_PCR_PWID_MASK;
+       pcr &= ~FMC2_PCR_PWID;
        if (set)
-               pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
-       writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+               pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
+       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
 }
 
-/* Enable/disable ECC */
-static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable)
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
 {
-       u32 pcr = readl(fmc2->io_base + FMC2_PCR);
+       u32 pcr = readl(nfc->io_base + FMC2_PCR);
 
        pcr &= ~FMC2_PCR_ECCEN;
        if (enable)
                pcr |= FMC2_PCR_ECCEN;
-       writel(pcr, fmc2->io_base + FMC2_PCR);
+       writel(pcr, nfc->io_base + FMC2_PCR);
 }
 
-/* Enable irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-       u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+       u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER);
 
        csqier |= FMC2_CSQIER_TCIE;
 
-       fmc2->irq_state = FMC2_IRQ_SEQ;
+       nfc->irq_state = FMC2_IRQ_SEQ;
 
-       writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+       writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER);
 }
 
-/* Disable irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-       u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+       u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER);
 
        csqier &= ~FMC2_CSQIER_TCIE;
 
-       writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+       writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER);
 
-       fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+       nfc->irq_state = FMC2_IRQ_UNKNOWN;
 }
 
-/* Clear irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-       writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR);
+       writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, nfc->io_base + FMC2_CSQICR);
 }
 
-/* Enable irq sources in case of bch is used */
-static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2,
-                                            int mode)
+static inline void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc,
+                                                int mode)
 {
-       u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+       u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER);
 
        if (mode == NAND_ECC_WRITE)
                bchier |= FMC2_BCHIER_EPBRIE;
        else
                bchier |= FMC2_BCHIER_DERIE;
 
-       fmc2->irq_state = FMC2_IRQ_BCH;
+       nfc->irq_state = FMC2_IRQ_BCH;
 
-       writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+       writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER);
 }
 
-/* Disable irq sources in case of bch is used */
-static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
 {
-       u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+       u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER);
 
        bchier &= ~FMC2_BCHIER_DERIE;
        bchier &= ~FMC2_BCHIER_EPBRIE;
 
-       writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+       writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER);
 
-       fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+       nfc->irq_state = FMC2_IRQ_UNKNOWN;
 }
 
-/* Clear irq sources in case of bch is used */
-static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
 {
-       writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR);
+       writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR);
 }
 
 /*
  * Enable ECC logic and reset syndrome/parity bits previously calculated
  * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
  */
-static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
+static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 
-       stm32_fmc2_set_ecc(fmc2, false);
+       stm32_fmc2_nfc_set_ecc(nfc, false);
 
        if (chip->ecc.strength != FMC2_ECC_HAM) {
-               u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+               u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
 
                if (mode == NAND_ECC_WRITE)
                        pcr |= FMC2_PCR_WEN;
                else
                        pcr &= ~FMC2_PCR_WEN;
-               writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+               writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
 
-               reinit_completion(&fmc2->complete);
-               stm32_fmc2_clear_bch_irq(fmc2);
-               stm32_fmc2_enable_bch_irq(fmc2, mode);
+               reinit_completion(&nfc->complete);
+               stm32_fmc2_nfc_clear_bch_irq(nfc);
+               stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
        }
 
-       stm32_fmc2_set_ecc(fmc2, true);
+       stm32_fmc2_nfc_set_ecc(nfc, true);
 }
 
 /*
@@ -526,40 +502,37 @@ static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
  * ECC is 3 bytes for 512 bytes of data (supports error correction up to
  * max of 1-bit)
  */
-static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+static inline void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
 {
        ecc[0] = ecc_sta;
        ecc[1] = ecc_sta >> 8;
        ecc[2] = ecc_sta >> 16;
 }
 
-static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
-                                   u8 *ecc)
+static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
+                                       u8 *ecc)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        u32 sr, heccr;
        int ret;
 
-       ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
-                                        sr, sr & FMC2_SR_NWRF, 10,
-                                        FMC2_TIMEOUT_MS);
+       ret = readl_relaxed_poll_timeout(nfc->io_base + FMC2_SR,
+                                        sr, sr & FMC2_SR_NWRF, 1,
+                                        1000 * FMC2_TIMEOUT_MS);
        if (ret) {
-               dev_err(fmc2->dev, "ham timeout\n");
+               dev_err(nfc->dev, "ham timeout\n");
                return ret;
        }
 
-       heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR);
-
-       stm32_fmc2_ham_set_ecc(heccr, ecc);
-
-       /* Disable ECC */
-       stm32_fmc2_set_ecc(fmc2, false);
+       heccr = readl_relaxed(nfc->io_base + FMC2_HECCR);
+       stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
+       stm32_fmc2_nfc_set_ecc(nfc, false);
 
        return 0;
 }
 
-static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
-                                 u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
+                                     u8 *read_ecc, u8 *calc_ecc)
 {
        u8 bit_position = 0, b0, b1, b2;
        u32 byte_addr = 0, b;
@@ -615,28 +588,28 @@ static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
  * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
  * max of 4-bit/8-bit)
  */
-static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
-                                   u8 *ecc)
+static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
+                                       u8 *ecc)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        u32 bchpbr;
 
        /* Wait until the BCH code is ready */
-       if (!wait_for_completion_timeout(&fmc2->complete,
+       if (!wait_for_completion_timeout(&nfc->complete,
                                         msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-               dev_err(fmc2->dev, "bch timeout\n");
-               stm32_fmc2_disable_bch_irq(fmc2);
+               dev_err(nfc->dev, "bch timeout\n");
+               stm32_fmc2_nfc_disable_bch_irq(nfc);
                return -ETIMEDOUT;
        }
 
        /* Read parity bits */
-       bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1);
+       bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR1);
        ecc[0] = bchpbr;
        ecc[1] = bchpbr >> 8;
        ecc[2] = bchpbr >> 16;
        ecc[3] = bchpbr >> 24;
 
-       bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2);
+       bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR2);
        ecc[4] = bchpbr;
        ecc[5] = bchpbr >> 8;
        ecc[6] = bchpbr >> 16;
@@ -644,24 +617,22 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
        if (chip->ecc.strength == FMC2_ECC_BCH8) {
                ecc[7] = bchpbr >> 24;
 
-               bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3);
+               bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR3);
                ecc[8] = bchpbr;
                ecc[9] = bchpbr >> 8;
                ecc[10] = bchpbr >> 16;
                ecc[11] = bchpbr >> 24;
 
-               bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4);
+               bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR4);
                ecc[12] = bchpbr;
        }
 
-       /* Disable ECC */
-       stm32_fmc2_set_ecc(fmc2, false);
+       stm32_fmc2_nfc_set_ecc(nfc, false);
 
        return 0;
 }
 
-/* BCH algorithm correction */
-static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
 {
        u32 bchdsr0 = ecc_sta[0];
        u32 bchdsr1 = ecc_sta[1];
@@ -680,16 +651,16 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
        if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
                return -EBADMSG;
 
-       pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK;
-       pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT;
-       pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK;
-       pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT;
-       pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK;
-       pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT;
-       pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK;
-       pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT;
+       pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
+       pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
+       pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
+       pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
+       pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
+       pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
+       pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
+       pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
 
-       den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT;
+       den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
        for (i = 0; i < den; i++) {
                if (pos[i] < eccsize * 8) {
                        change_bit(pos[i], (unsigned long *)dat);
@@ -700,34 +671,33 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
        return nb_errs;
 }
 
-static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
-                                 u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
+                                     u8 *read_ecc, u8 *calc_ecc)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        u32 ecc_sta[5];
 
        /* Wait until the decoding error is ready */
-       if (!wait_for_completion_timeout(&fmc2->complete,
+       if (!wait_for_completion_timeout(&nfc->complete,
                                         msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-               dev_err(fmc2->dev, "bch timeout\n");
-               stm32_fmc2_disable_bch_irq(fmc2);
+               dev_err(nfc->dev, "bch timeout\n");
+               stm32_fmc2_nfc_disable_bch_irq(nfc);
                return -ETIMEDOUT;
        }
 
-       ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0);
-       ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1);
-       ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2);
-       ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3);
-       ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4);
+       ecc_sta[0] = readl_relaxed(nfc->io_base + FMC2_BCHDSR0);
+       ecc_sta[1] = readl_relaxed(nfc->io_base + FMC2_BCHDSR1);
+       ecc_sta[2] = readl_relaxed(nfc->io_base + FMC2_BCHDSR2);
+       ecc_sta[3] = readl_relaxed(nfc->io_base + FMC2_BCHDSR3);
+       ecc_sta[4] = readl_relaxed(nfc->io_base + FMC2_BCHDSR4);
 
-       /* Disable ECC */
-       stm32_fmc2_set_ecc(fmc2, false);
+       stm32_fmc2_nfc_set_ecc(nfc, false);
 
-       return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta);
+       return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
 }
 
-static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
-                               int oob_required, int page)
+static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
+                                   int oob_required, int page)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        int ret, i, s, stat, eccsize = chip->ecc.size;
@@ -789,21 +759,21 @@ static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
 }
 
 /* Sequencer read/write configuration */
-static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
-                                   int raw, bool write_data)
+static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
+                                       int raw, bool write_data)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct mtd_info *mtd = nand_to_mtd(chip);
        u32 csqcfgr1, csqcfgr2, csqcfgr3;
        u32 csqar1, csqar2;
        u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
-       u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
 
        if (write_data)
                pcr |= FMC2_PCR_WEN;
        else
                pcr &= ~FMC2_PCR_WEN;
-       writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
 
        /*
         * - Set Program Page/Page Read command
@@ -812,11 +782,11 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
         */
        csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
        if (write_data)
-               csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN);
+               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
        else
-               csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) |
+               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
                            FMC2_CSQCFGR1_CMD2EN |
-                           FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) |
+                           FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
                            FMC2_CSQCFGR1_CMD2T;
 
        /*
@@ -826,11 +796,12 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
         * - Set timings
         */
        if (write_data)
-               csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN);
+               csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
        else
-               csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) |
+               csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
                           FMC2_CSQCFGR2_RCMD2EN |
-                          FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) |
+                          FIELD_PREP(FMC2_CSQCFGR2_RCMD2,
+                                     NAND_CMD_RNDOUTSTART) |
                           FMC2_CSQCFGR2_RCMD1T |
                           FMC2_CSQCFGR2_RCMD2T;
        if (!raw) {
@@ -842,7 +813,7 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
         * - Set the number of sectors to be written
         * - Set timings
         */
-       csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1);
+       csqcfgr3 = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
        if (write_data) {
                csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
                if (chip->options & NAND_ROW_ADDR_3)
@@ -856,8 +827,8 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
         * Byte 1 and byte 2 => column, we start at 0x0
         * Byte 3 and byte 4 => page
         */
-       csqar1 = FMC2_CSQCAR1_ADDC3(page);
-       csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
+       csqar1 = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
+       csqar1 |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
 
        /*
         * - Set chip enable number
@@ -865,43 +836,44 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
         * - Calculate the number of address cycles to be issued
         * - Set byte 5 of address cycle if needed
         */
-       csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel);
+       csqar2 = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
        if (chip->options & NAND_BUSWIDTH_16)
-               csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1);
+               csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
        else
-               csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset);
+               csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
        if (chip->options & NAND_ROW_ADDR_3) {
-               csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5);
-               csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
+               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
+               csqar2 |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
        } else {
-               csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4);
+               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
        }
 
-       writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1);
-       writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2);
-       writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3);
-       writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1);
-       writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2);
+       writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1);
+       writel_relaxed(csqcfgr2, nfc->io_base + FMC2_CSQCFGR2);
+       writel_relaxed(csqcfgr3, nfc->io_base + FMC2_CSQCFGR3);
+       writel_relaxed(csqar1, nfc->io_base + FMC2_CSQAR1);
+       writel_relaxed(csqar2, nfc->io_base + FMC2_CSQAR2);
 }
 
-static void stm32_fmc2_dma_callback(void *arg)
+static void stm32_fmc2_nfc_dma_callback(void *arg)
 {
        complete((struct completion *)arg);
 }
 
 /* Read/write data from/to a page */
-static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
-                          int raw, bool write_data)
+static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+                              int raw, bool write_data)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct dma_async_tx_descriptor *desc_data, *desc_ecc;
        struct scatterlist *sg;
-       struct dma_chan *dma_ch = fmc2->dma_rx_ch;
+       struct dma_chan *dma_ch = nfc->dma_rx_ch;
        enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
        enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
-       u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR);
+       u32 csqcr = readl_relaxed(nfc->io_base + FMC2_CSQCR);
        int eccsteps = chip->ecc.steps;
        int eccsize = chip->ecc.size;
+       unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
        const u8 *p = buf;
        int s, ret;
 
@@ -909,20 +881,20 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
        if (write_data) {
                dma_data_dir = DMA_TO_DEVICE;
                dma_transfer_dir = DMA_MEM_TO_DEV;
-               dma_ch = fmc2->dma_tx_ch;
+               dma_ch = nfc->dma_tx_ch;
        }
 
-       for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) {
+       for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
                sg_set_buf(sg, p, eccsize);
                p += eccsize;
        }
 
-       ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl,
+       ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
                         eccsteps, dma_data_dir);
        if (ret < 0)
                return ret;
 
-       desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl,
+       desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
                                            eccsteps, dma_transfer_dir,
                                            DMA_PREP_INTERRUPT);
        if (!desc_data) {
@@ -930,10 +902,10 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
                goto err_unmap_data;
        }
 
-       reinit_completion(&fmc2->dma_data_complete);
-       reinit_completion(&fmc2->complete);
-       desc_data->callback = stm32_fmc2_dma_callback;
-       desc_data->callback_param = &fmc2->dma_data_complete;
+       reinit_completion(&nfc->dma_data_complete);
+       reinit_completion(&nfc->complete);
+       desc_data->callback = stm32_fmc2_nfc_dma_callback;
+       desc_data->callback_param = &nfc->dma_data_complete;
        ret = dma_submit_error(dmaengine_submit(desc_data));
        if (ret)
                goto err_unmap_data;
@@ -942,19 +914,19 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
 
        if (!write_data && !raw) {
                /* Configure DMA ECC status */
-               p = fmc2->ecc_buf;
-               for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) {
-                       sg_set_buf(sg, p, fmc2->dma_ecc_len);
-                       p += fmc2->dma_ecc_len;
+               p = nfc->ecc_buf;
+               for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+                       sg_set_buf(sg, p, nfc->dma_ecc_len);
+                       p += nfc->dma_ecc_len;
                }
 
-               ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+               ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
                                 eccsteps, dma_data_dir);
                if (ret < 0)
                        goto err_unmap_data;
 
-               desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch,
-                                                  fmc2->dma_ecc_sg.sgl,
+               desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+                                                  nfc->dma_ecc_sg.sgl,
                                                   eccsteps, dma_transfer_dir,
                                                   DMA_PREP_INTERRUPT);
                if (!desc_ecc) {
@@ -962,76 +934,73 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
                        goto err_unmap_ecc;
                }
 
-               reinit_completion(&fmc2->dma_ecc_complete);
-               desc_ecc->callback = stm32_fmc2_dma_callback;
-               desc_ecc->callback_param = &fmc2->dma_ecc_complete;
+               reinit_completion(&nfc->dma_ecc_complete);
+               desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
+               desc_ecc->callback_param = &nfc->dma_ecc_complete;
                ret = dma_submit_error(dmaengine_submit(desc_ecc));
                if (ret)
                        goto err_unmap_ecc;
 
-               dma_async_issue_pending(fmc2->dma_ecc_ch);
+               dma_async_issue_pending(nfc->dma_ecc_ch);
        }
 
-       stm32_fmc2_clear_seq_irq(fmc2);
-       stm32_fmc2_enable_seq_irq(fmc2);
+       stm32_fmc2_nfc_clear_seq_irq(nfc);
+       stm32_fmc2_nfc_enable_seq_irq(nfc);
 
        /* Start the transfer */
        csqcr |= FMC2_CSQCR_CSQSTART;
-       writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR);
+       writel_relaxed(csqcr, nfc->io_base + FMC2_CSQCR);
 
        /* Wait end of sequencer transfer */
-       if (!wait_for_completion_timeout(&fmc2->complete,
-                                        msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-               dev_err(fmc2->dev, "seq timeout\n");
-               stm32_fmc2_disable_seq_irq(fmc2);
+       if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
+               dev_err(nfc->dev, "seq timeout\n");
+               stm32_fmc2_nfc_disable_seq_irq(nfc);
                dmaengine_terminate_all(dma_ch);
                if (!write_data && !raw)
-                       dmaengine_terminate_all(fmc2->dma_ecc_ch);
+                       dmaengine_terminate_all(nfc->dma_ecc_ch);
                ret = -ETIMEDOUT;
                goto err_unmap_ecc;
        }
 
        /* Wait DMA data transfer completion */
-       if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
-                                        msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-               dev_err(fmc2->dev, "data DMA timeout\n");
+       if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
+               dev_err(nfc->dev, "data DMA timeout\n");
                dmaengine_terminate_all(dma_ch);
                ret = -ETIMEDOUT;
        }
 
        /* Wait DMA ECC transfer completion */
        if (!write_data && !raw) {
-               if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
-                                       msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-                       dev_err(fmc2->dev, "ECC DMA timeout\n");
-                       dmaengine_terminate_all(fmc2->dma_ecc_ch);
+               if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
+                                                timeout)) {
+                       dev_err(nfc->dev, "ECC DMA timeout\n");
+                       dmaengine_terminate_all(nfc->dma_ecc_ch);
                        ret = -ETIMEDOUT;
                }
        }
 
 err_unmap_ecc:
        if (!write_data && !raw)
-               dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+               dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
                             eccsteps, dma_data_dir);
 
 err_unmap_data:
-       dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir);
+       dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
 
        return ret;
 }
 
-static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
-                                     const u8 *buf, int oob_required,
-                                     int page, int raw)
+static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
+                                   int oob_required, int page, int raw)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        int ret;
 
        /* Configure the sequencer */
-       stm32_fmc2_rw_page_init(chip, page, raw, true);
+       stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
 
        /* Write the page */
-       ret = stm32_fmc2_xfer(chip, buf, raw, true);
+       ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
        if (ret)
                return ret;
 
@@ -1047,55 +1016,50 @@ static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
        return nand_prog_page_end_op(chip);
 }
 
-static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip,
-                                          const u8 *buf,
-                                          int oob_required,
-                                          int page)
+static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
+                                        int oob_required, int page)
 {
        int ret;
 
-       /* Select the target */
-       ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+       ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
        if (ret)
                return ret;
 
-       return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
+       return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
 }
 
-static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip,
-                                              const u8 *buf,
-                                              int oob_required,
-                                              int page)
+static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
+                                            const u8 *buf, int oob_required,
+                                            int page)
 {
        int ret;
 
-       /* Select the target */
-       ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+       ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
        if (ret)
                return ret;
 
-       return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
+       return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
 }
 
 /* Get a status indicating which sectors have errors */
-static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2)
+static inline u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
 {
-       u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR);
+       u32 csqemsr = readl_relaxed(nfc->io_base + FMC2_CSQEMSR);
 
        return csqemsr & FMC2_CSQEMSR_SEM;
 }
 
-static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
-                                       u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
+                                     u8 *read_ecc, u8 *calc_ecc)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        int eccbytes = chip->ecc.bytes;
        int eccsteps = chip->ecc.steps;
        int eccstrength = chip->ecc.strength;
        int i, s, eccsize = chip->ecc.size;
-       u32 *ecc_sta = (u32 *)fmc2->ecc_buf;
-       u16 sta_map = stm32_fmc2_get_mapping_status(fmc2);
+       u32 *ecc_sta = (u32 *)nfc->ecc_buf;
+       u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
        unsigned int max_bitflips = 0;
 
        for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
@@ -1104,10 +1068,11 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
                if (eccstrength == FMC2_ECC_HAM) {
                        /* Ecc_sta = FMC2_HECCR */
                        if (sta_map & BIT(s)) {
-                               stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]);
-                               stat = stm32_fmc2_ham_correct(chip, dat,
-                                                             &read_ecc[i],
-                                                             &calc_ecc[i]);
+                               stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
+                                                          &calc_ecc[i]);
+                               stat = stm32_fmc2_nfc_ham_correct(chip, dat,
+                                                                 &read_ecc[i],
+                                                                 &calc_ecc[i]);
                        }
                        ecc_sta++;
                } else {
@@ -1119,8 +1084,8 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
                         * Ecc_sta[4] = FMC2_BCHDSR4
                         */
                        if (sta_map & BIT(s))
-                               stat = stm32_fmc2_bch_decode(eccsize, dat,
-                                                            ecc_sta);
+                               stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
+                                                                ecc_sta);
                        ecc_sta += 5;
                }
 
@@ -1143,30 +1108,29 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
        return max_bitflips;
 }
 
-static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
-                                         int oob_required, int page)
+static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
+                                       int oob_required, int page)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        u8 *ecc_calc = chip->ecc.calc_buf;
        u8 *ecc_code = chip->ecc.code_buf;
        u16 sta_map;
        int ret;
 
-       /* Select the target */
-       ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+       ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
        if (ret)
                return ret;
 
        /* Configure the sequencer */
-       stm32_fmc2_rw_page_init(chip, page, 0, false);
+       stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
 
        /* Read the page */
-       ret = stm32_fmc2_xfer(chip, buf, 0, false);
+       ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
        if (ret)
                return ret;
 
-       sta_map = stm32_fmc2_get_mapping_status(fmc2);
+       sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
 
        /* Check if errors happen */
        if (likely(!sta_map)) {
@@ -1193,22 +1157,21 @@ static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
        return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
 }
 
-static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
-                                             int oob_required, int page)
+static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
+                                           int oob_required, int page)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        int ret;
 
-       /* Select the target */
-       ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+       ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
        if (ret)
                return ret;
 
        /* Configure the sequencer */
-       stm32_fmc2_rw_page_init(chip, page, 1, false);
+       stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
 
        /* Read the page */
-       ret = stm32_fmc2_xfer(chip, buf, 1, false);
+       ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
        if (ret)
                return ret;
 
@@ -1221,31 +1184,31 @@ static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
        return 0;
 }
 
-static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id)
+static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
 {
-       struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id;
+       struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
 
-       if (fmc2->irq_state == FMC2_IRQ_SEQ)
+       if (nfc->irq_state == FMC2_IRQ_SEQ)
                /* Sequencer is used */
-               stm32_fmc2_disable_seq_irq(fmc2);
-       else if (fmc2->irq_state == FMC2_IRQ_BCH)
+               stm32_fmc2_nfc_disable_seq_irq(nfc);
+       else if (nfc->irq_state == FMC2_IRQ_BCH)
                /* BCH is used */
-               stm32_fmc2_disable_bch_irq(fmc2);
+               stm32_fmc2_nfc_disable_bch_irq(nfc);
 
-       complete(&fmc2->complete);
+       complete(&nfc->complete);
 
        return IRQ_HANDLED;
 }
 
-static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
-                                unsigned int len, bool force_8bit)
+static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
+                                    unsigned int len, bool force_8bit)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
-       void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel];
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+       void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
 
        if (force_8bit && chip->options & NAND_BUSWIDTH_16)
                /* Reconfigure bus width to 8-bit */
-               stm32_fmc2_set_buswidth_16(fmc2, false);
+               stm32_fmc2_nfc_set_buswidth_16(nfc, false);
 
        if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
                if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
@@ -1281,18 +1244,18 @@ static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
 
        if (force_8bit && chip->options & NAND_BUSWIDTH_16)
                /* Reconfigure bus width to 16-bit */
-               stm32_fmc2_set_buswidth_16(fmc2, true);
+               stm32_fmc2_nfc_set_buswidth_16(nfc, true);
 }
 
-static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
-                                 unsigned int len, bool force_8bit)
+static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
+                                     unsigned int len, bool force_8bit)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
-       void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel];
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+       void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
 
        if (force_8bit && chip->options & NAND_BUSWIDTH_16)
                /* Reconfigure bus width to 8-bit */
-               stm32_fmc2_set_buswidth_16(fmc2, false);
+               stm32_fmc2_nfc_set_buswidth_16(nfc, false);
 
        if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
                if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
@@ -1328,48 +1291,49 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
 
        if (force_8bit && chip->options & NAND_BUSWIDTH_16)
                /* Reconfigure bus width to 16-bit */
-               stm32_fmc2_set_buswidth_16(fmc2, true);
+               stm32_fmc2_nfc_set_buswidth_16(nfc, true);
 }
 
-static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
+                                 unsigned long timeout_ms)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        const struct nand_sdr_timings *timings;
        u32 isr, sr;
 
        /* Check if there is no pending requests to the NAND flash */
-       if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+       if (readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_SR, sr,
                                              sr & FMC2_SR_NWRF, 1,
-                                             FMC2_TIMEOUT_US))
-               dev_warn(fmc2->dev, "Waitrdy timeout\n");
+                                             1000 * FMC2_TIMEOUT_MS))
+               dev_warn(nfc->dev, "Waitrdy timeout\n");
 
        /* Wait tWB before R/B# signal is low */
        timings = nand_get_sdr_timings(&chip->data_interface);
        ndelay(PSEC_TO_NSEC(timings->tWB_max));
 
        /* R/B# signal is low, clear high level flag */
-       writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+       writel_relaxed(FMC2_ICR_CIHLF, nfc->io_base + FMC2_ICR);
 
        /* Wait R/B# signal is high */
-       return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+       return readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_ISR,
                                                 isr, isr & FMC2_ISR_IHLF,
                                                 5, 1000 * timeout_ms);
 }
 
-static int stm32_fmc2_exec_op(struct nand_chip *chip,
-                             const struct nand_operation *op,
-                             bool check_only)
+static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
+                                 const struct nand_operation *op,
+                                 bool check_only)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        const struct nand_op_instr *instr = NULL;
-       unsigned int op_id, i;
+       unsigned int op_id, i, timeout;
        int ret;
 
-       ret = stm32_fmc2_select_chip(chip, op->cs);
-       if (ret)
-               return ret;
-
        if (check_only)
+               return 0;
+
+       ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
+       if (ret)
                return ret;
 
        for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -1378,30 +1342,30 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
                switch (instr->type) {
                case NAND_OP_CMD_INSTR:
                        writeb_relaxed(instr->ctx.cmd.opcode,
-                                      fmc2->cmd_base[fmc2->cs_sel]);
+                                      nfc->cmd_base[nfc->cs_sel]);
                        break;
 
                case NAND_OP_ADDR_INSTR:
                        for (i = 0; i < instr->ctx.addr.naddrs; i++)
                                writeb_relaxed(instr->ctx.addr.addrs[i],
-                                              fmc2->addr_base[fmc2->cs_sel]);
+                                              nfc->addr_base[nfc->cs_sel]);
                        break;
 
                case NAND_OP_DATA_IN_INSTR:
-                       stm32_fmc2_read_data(chip, instr->ctx.data.buf.in,
-                                            instr->ctx.data.len,
-                                            instr->ctx.data.force_8bit);
+                       stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
+                                                instr->ctx.data.len,
+                                                instr->ctx.data.force_8bit);
                        break;
 
                case NAND_OP_DATA_OUT_INSTR:
-                       stm32_fmc2_write_data(chip, instr->ctx.data.buf.out,
-                                             instr->ctx.data.len,
-                                             instr->ctx.data.force_8bit);
+                       stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
+                                                 instr->ctx.data.len,
+                                                 instr->ctx.data.force_8bit);
                        break;
 
                case NAND_OP_WAITRDY_INSTR:
-                       ret = stm32_fmc2_waitrdy(chip,
-                                                instr->ctx.waitrdy.timeout_ms);
+                       timeout = instr->ctx.waitrdy.timeout_ms;
+                       ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
                        break;
                }
        }
@@ -1409,21 +1373,20 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
        return ret;
 }
 
-/* Controller initialization */
-static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
 {
-       u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
-       u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1);
+       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
+       u32 bcr1 = readl_relaxed(nfc->io_base + FMC2_BCR1);
 
        /* Set CS used to undefined */
-       fmc2->cs_sel = -1;
+       nfc->cs_sel = -1;
 
        /* Enable wait feature and nand flash memory bank */
        pcr |= FMC2_PCR_PWAITEN;
        pcr |= FMC2_PCR_PBKEN;
 
        /* Set buswidth to 8 bits mode for identification */
-       pcr &= ~FMC2_PCR_PWID_MASK;
+       pcr &= ~FMC2_PCR_PWID;
 
        /* ECC logic is disabled */
        pcr &= ~FMC2_PCR_ECCEN;
@@ -1434,32 +1397,31 @@ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
        pcr &= ~FMC2_PCR_WEN;
 
        /* Set default ECC sector size */
-       pcr &= ~FMC2_PCR_ECCSS_MASK;
-       pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048);
+       pcr &= ~FMC2_PCR_ECCSS;
+       pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
 
        /* Set default tclr/tar timings */
-       pcr &= ~FMC2_PCR_TCLR_MASK;
-       pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT);
-       pcr &= ~FMC2_PCR_TAR_MASK;
-       pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT);
+       pcr &= ~FMC2_PCR_TCLR;
+       pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
+       pcr &= ~FMC2_PCR_TAR;
+       pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
 
        /* Enable FMC2 controller */
        bcr1 |= FMC2_BCR1_FMC2EN;
 
-       writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1);
-       writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
-       writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM);
-       writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT);
+       writel_relaxed(bcr1, nfc->io_base + FMC2_BCR1);
+       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+       writel_relaxed(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM);
+       writel_relaxed(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT);
 }
 
-/* Controller timings */
-static void stm32_fmc2_calc_timings(struct nand_chip *chip,
-                                   const struct nand_sdr_timings *sdrt)
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
+                                       const struct nand_sdr_timings *sdrt)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
        struct stm32_fmc2_timings *tims = &nand->timings;
-       unsigned long hclk = clk_get_rate(fmc2->clk);
+       unsigned long hclk = clk_get_rate(nfc->clk);
        unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
        unsigned long timing, tar, tclr, thiz, twait;
        unsigned long tset_mem, tset_att, thold_mem, thold_att;
@@ -1583,8 +1545,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
        tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
 }
 
-static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
-                                     const struct nand_data_interface *conf)
+static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+                                         const struct nand_data_interface *conf)
 {
        const struct nand_sdr_timings *sdrt;
 
@@ -1595,71 +1557,67 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
        if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
                return 0;
 
-       stm32_fmc2_calc_timings(chip, sdrt);
-
-       /* Apply timings */
-       stm32_fmc2_timings_init(chip);
+       stm32_fmc2_nfc_calc_timings(chip, sdrt);
+       stm32_fmc2_nfc_timings_init(chip);
 
        return 0;
 }
 
-/* DMA configuration */
-static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
+static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
 {
        int ret = 0;
 
-       fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx");
-       if (IS_ERR(fmc2->dma_tx_ch)) {
-               ret = PTR_ERR(fmc2->dma_tx_ch);
+       nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
+       if (IS_ERR(nfc->dma_tx_ch)) {
+               ret = PTR_ERR(nfc->dma_tx_ch);
                if (ret != -ENODEV)
-                       dev_err(fmc2->dev,
+                       dev_err(nfc->dev,
                                "failed to request tx DMA channel: %d\n", ret);
-               fmc2->dma_tx_ch = NULL;
+               nfc->dma_tx_ch = NULL;
                goto err_dma;
        }
 
-       fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx");
-       if (IS_ERR(fmc2->dma_rx_ch)) {
-               ret = PTR_ERR(fmc2->dma_rx_ch);
+       nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
+       if (IS_ERR(nfc->dma_rx_ch)) {
+               ret = PTR_ERR(nfc->dma_rx_ch);
                if (ret != -ENODEV)
-                       dev_err(fmc2->dev,
+                       dev_err(nfc->dev,
                                "failed to request rx DMA channel: %d\n", ret);
-               fmc2->dma_rx_ch = NULL;
+               nfc->dma_rx_ch = NULL;
                goto err_dma;
        }
 
-       fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc");
-       if (IS_ERR(fmc2->dma_ecc_ch)) {
-               ret = PTR_ERR(fmc2->dma_ecc_ch);
+       nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
+       if (IS_ERR(nfc->dma_ecc_ch)) {
+               ret = PTR_ERR(nfc->dma_ecc_ch);
                if (ret != -ENODEV)
-                       dev_err(fmc2->dev,
+                       dev_err(nfc->dev,
                                "failed to request ecc DMA channel: %d\n", ret);
-               fmc2->dma_ecc_ch = NULL;
+               nfc->dma_ecc_ch = NULL;
                goto err_dma;
        }
 
-       ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+       ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
        if (ret)
                return ret;
 
        /* Allocate a buffer to store ECC status registers */
-       fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN,
-                                    GFP_KERNEL);
-       if (!fmc2->ecc_buf)
+       nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+       if (!nfc->ecc_buf)
                return -ENOMEM;
 
-       ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+       ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
        if (ret)
                return ret;
 
-       init_completion(&fmc2->dma_data_complete);
-       init_completion(&fmc2->dma_ecc_complete);
+       init_completion(&nfc->dma_data_complete);
+       init_completion(&nfc->dma_ecc_complete);
 
        return 0;
 
 err_dma:
        if (ret == -ENODEV) {
-               dev_warn(fmc2->dev,
+               dev_warn(nfc->dev,
                         "DMAs not defined in the DT, polling mode is used\n");
                ret = 0;
        }
@@ -1667,35 +1625,34 @@ err_dma:
        return ret;
 }
 
-/* NAND callbacks setup */
-static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 
        /*
         * Specific callbacks to read/write a page depending on
         * the mode (polling/sequencer) and the algo used (Hamming, BCH).
         */
-       if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) {
+       if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
                /* DMA => use sequencer mode callbacks */
-               chip->ecc.correct = stm32_fmc2_sequencer_correct;
-               chip->ecc.write_page = stm32_fmc2_sequencer_write_page;
-               chip->ecc.read_page = stm32_fmc2_sequencer_read_page;
-               chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw;
-               chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw;
+               chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
+               chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
+               chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
+               chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
+               chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
        } else {
                /* No DMA => use polling mode callbacks */
-               chip->ecc.hwctl = stm32_fmc2_hwctl;
+               chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
                if (chip->ecc.strength == FMC2_ECC_HAM) {
                        /* Hamming is used */
-                       chip->ecc.calculate = stm32_fmc2_ham_calculate;
-                       chip->ecc.correct = stm32_fmc2_ham_correct;
+                       chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
+                       chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
                        chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
                } else {
                        /* BCH is used */
-                       chip->ecc.calculate = stm32_fmc2_bch_calculate;
-                       chip->ecc.correct = stm32_fmc2_bch_correct;
-                       chip->ecc.read_page = stm32_fmc2_read_page;
+                       chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
+                       chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
+                       chip->ecc.read_page = stm32_fmc2_nfc_read_page;
                }
        }
 
@@ -1708,9 +1665,8 @@ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
                chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
 }
 
-/* FMC2 layout */
-static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
-                                        struct mtd_oob_region *oobregion)
+static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+                                       struct mtd_oob_region *oobregion)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -1724,8 +1680,8 @@ static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
        return 0;
 }
 
-static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
-                                         struct mtd_oob_region *oobregion)
+static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+                                        struct mtd_oob_region *oobregion)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -1739,13 +1695,12 @@ static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
        return 0;
 }
 
-static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = {
-       .ecc = stm32_fmc2_nand_ooblayout_ecc,
-       .free = stm32_fmc2_nand_ooblayout_free,
+static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
+       .ecc = stm32_fmc2_nfc_ooblayout_ecc,
+       .free = stm32_fmc2_nfc_ooblayout_free,
 };
 
-/* FMC2 caps */
-static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
 {
        /* Hamming */
        if (strength == FMC2_ECC_HAM)
@@ -1759,14 +1714,13 @@ static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
        return 8;
 }
 
-NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes,
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
                     FMC2_ECC_STEP_SIZE,
                     FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
 
-/* FMC2 controller ops */
-static int stm32_fmc2_attach_chip(struct nand_chip *chip)
+static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
 {
-       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct mtd_info *mtd = nand_to_mtd(chip);
        int ret;
 
@@ -1778,49 +1732,45 @@ static int stm32_fmc2_attach_chip(struct nand_chip *chip)
         * ECC sector size = 512
         */
        if (chip->ecc.mode != NAND_ECC_HW) {
-               dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n");
+               dev_err(nfc->dev, "nand_ecc_mode is not well defined in the DT\n");
                return -EINVAL;
        }
 
-       ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps,
+       ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
                                   mtd->oobsize - FMC2_BBM_LEN);
        if (ret) {
-               dev_err(fmc2->dev, "no valid ECC settings set\n");
+               dev_err(nfc->dev, "no valid ECC settings set\n");
                return ret;
        }
 
        if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
-               dev_err(fmc2->dev, "nand page size is not supported\n");
+               dev_err(nfc->dev, "nand page size is not supported\n");
                return -EINVAL;
        }
 
        if (chip->bbt_options & NAND_BBT_USE_FLASH)
                chip->bbt_options |= NAND_BBT_NO_OOB;
 
-       /* NAND callbacks setup */
-       stm32_fmc2_nand_callbacks_setup(chip);
+       stm32_fmc2_nfc_nand_callbacks_setup(chip);
 
-       /* Define ECC layout */
-       mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops);
+       mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
 
-       /* Configure bus width to 16-bit */
        if (chip->options & NAND_BUSWIDTH_16)
-               stm32_fmc2_set_buswidth_16(fmc2, true);
+               stm32_fmc2_nfc_set_buswidth_16(nfc, true);
 
        return 0;
 }
 
-static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = {
-       .attach_chip = stm32_fmc2_attach_chip,
-       .exec_op = stm32_fmc2_exec_op,
-       .setup_data_interface = stm32_fmc2_setup_interface,
+static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
+       .attach_chip = stm32_fmc2_nfc_attach_chip,
+       .exec_op = stm32_fmc2_nfc_exec_op,
+       .setup_data_interface = stm32_fmc2_nfc_setup_interface,
 };
 
-/* FMC2 probe */
-static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
-                                 struct device_node *dn)
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
+                                     struct device_node *dn)
 {
-       struct stm32_fmc2_nand *nand = &fmc2->nand;
+       struct stm32_fmc2_nand *nand = &nfc->nand;
        u32 cs;
        int ret, i;
 
@@ -1829,29 +1779,29 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
 
        nand->ncs /= sizeof(u32);
        if (!nand->ncs) {
-               dev_err(fmc2->dev, "invalid reg property size\n");
+               dev_err(nfc->dev, "invalid reg property size\n");
                return -EINVAL;
        }
 
        for (i = 0; i < nand->ncs; i++) {
                ret = of_property_read_u32_index(dn, "reg", i, &cs);
                if (ret) {
-                       dev_err(fmc2->dev, "could not retrieve reg property: %d\n",
+                       dev_err(nfc->dev, "could not retrieve reg property: %d\n",
                                ret);
                        return ret;
                }
 
                if (cs > FMC2_MAX_CE) {
-                       dev_err(fmc2->dev, "invalid reg value: %d\n", cs);
+                       dev_err(nfc->dev, "invalid reg value: %d\n", cs);
                        return -EINVAL;
                }
 
-               if (fmc2->cs_assigned & BIT(cs)) {
-                       dev_err(fmc2->dev, "cs already assigned: %d\n", cs);
+               if (nfc->cs_assigned & BIT(cs)) {
+                       dev_err(nfc->dev, "cs already assigned: %d\n", cs);
                        return -EINVAL;
                }
 
-               fmc2->cs_assigned |= BIT(cs);
+               nfc->cs_assigned |= BIT(cs);
                nand->cs_used[i] = cs;
        }
 
@@ -1860,25 +1810,25 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
        return 0;
 }
 
-static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
+static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
 {
-       struct device_node *dn = fmc2->dev->of_node;
+       struct device_node *dn = nfc->dev->of_node;
        struct device_node *child;
        int nchips = of_get_child_count(dn);
        int ret = 0;
 
        if (!nchips) {
-               dev_err(fmc2->dev, "NAND chip not defined\n");
+               dev_err(nfc->dev, "NAND chip not defined\n");
                return -EINVAL;
        }
 
        if (nchips > 1) {
-               dev_err(fmc2->dev, "too many NAND chips defined\n");
+               dev_err(nfc->dev, "too many NAND chips defined\n");
                return -EINVAL;
        }
 
        for_each_child_of_node(dn, child) {
-               ret = stm32_fmc2_parse_child(fmc2, child);
+               ret = stm32_fmc2_nfc_parse_child(nfc, child);
                if (ret < 0) {
                        of_node_put(child);
                        return ret;
@@ -1888,106 +1838,108 @@ static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
        return ret;
 }
 
-static int stm32_fmc2_probe(struct platform_device *pdev)
+static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct reset_control *rstc;
-       struct stm32_fmc2_nfc *fmc2;
+       struct stm32_fmc2_nfc *nfc;
        struct stm32_fmc2_nand *nand;
        struct resource *res;
        struct mtd_info *mtd;
        struct nand_chip *chip;
        int chip_cs, mem_region, ret, irq;
 
-       fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL);
-       if (!fmc2)
+       nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+       if (!nfc)
                return -ENOMEM;
 
-       fmc2->dev = dev;
-       nand_controller_init(&fmc2->base);
-       fmc2->base.ops = &stm32_fmc2_nand_controller_ops;
+       nfc->dev = dev;
+       nand_controller_init(&nfc->base);
+       nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
 
-       ret = stm32_fmc2_parse_dt(fmc2);
+       ret = stm32_fmc2_nfc_parse_dt(nfc);
        if (ret)
                return ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       fmc2->io_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(fmc2->io_base))
-               return PTR_ERR(fmc2->io_base);
+       nfc->io_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(nfc->io_base))
+               return PTR_ERR(nfc->io_base);
 
-       fmc2->io_phys_addr = res->start;
+       nfc->io_phys_addr = res->start;
 
        for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
             chip_cs++, mem_region += 3) {
-               if (!(fmc2->cs_assigned & BIT(chip_cs)))
+               if (!(nfc->cs_assigned & BIT(chip_cs)))
                        continue;
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
-               fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res);
-               if (IS_ERR(fmc2->data_base[chip_cs]))
-                       return PTR_ERR(fmc2->data_base[chip_cs]);
+               nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+               if (IS_ERR(nfc->data_base[chip_cs]))
+                       return PTR_ERR(nfc->data_base[chip_cs]);
 
-               fmc2->data_phys_addr[chip_cs] = res->start;
+               nfc->data_phys_addr[chip_cs] = res->start;
 
                res = platform_get_resource(pdev, IORESOURCE_MEM,
                                            mem_region + 1);
-               fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
-               if (IS_ERR(fmc2->cmd_base[chip_cs]))
-                       return PTR_ERR(fmc2->cmd_base[chip_cs]);
+               nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
+               if (IS_ERR(nfc->cmd_base[chip_cs]))
+                       return PTR_ERR(nfc->cmd_base[chip_cs]);
 
                res = platform_get_resource(pdev, IORESOURCE_MEM,
                                            mem_region + 2);
-               fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
-               if (IS_ERR(fmc2->addr_base[chip_cs]))
-                       return PTR_ERR(fmc2->addr_base[chip_cs]);
+               nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
+               if (IS_ERR(nfc->addr_base[chip_cs]))
+                       return PTR_ERR(nfc->addr_base[chip_cs]);
        }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
 
-       ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
-                              dev_name(dev), fmc2);
+       ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
+                              dev_name(dev), nfc);
        if (ret) {
                dev_err(dev, "failed to request irq\n");
                return ret;
        }
 
-       init_completion(&fmc2->complete);
+       init_completion(&nfc->complete);
 
-       fmc2->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(fmc2->clk))
-               return PTR_ERR(fmc2->clk);
+       nfc->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(nfc->clk))
+               return PTR_ERR(nfc->clk);
 
-       ret = clk_prepare_enable(fmc2->clk);
+       ret = clk_prepare_enable(nfc->clk);
        if (ret) {
                dev_err(dev, "can not enable the clock\n");
                return ret;
        }
 
        rstc = devm_reset_control_get(dev, NULL);
-       if (!IS_ERR(rstc)) {
+       if (IS_ERR(rstc)) {
+               ret = PTR_ERR(rstc);
+               if (ret == -EPROBE_DEFER)
+                       goto err_clk_disable;
+       } else {
                reset_control_assert(rstc);
                reset_control_deassert(rstc);
        }
 
-       /* DMA setup */
-       ret = stm32_fmc2_dma_setup(fmc2);
+       ret = stm32_fmc2_nfc_dma_setup(nfc);
        if (ret)
-               return ret;
+               goto err_release_dma;
 
-       /* FMC2 init routine */
-       stm32_fmc2_init(fmc2);
+       stm32_fmc2_nfc_init(nfc);
 
-       nand = &fmc2->nand;
+       nand = &nfc->nand;
        chip = &nand->chip;
        mtd = nand_to_mtd(chip);
        mtd->dev.parent = dev;
 
-       chip->controller = &fmc2->base;
+       chip->controller = &nfc->base;
        chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
-                        NAND_USE_BOUNCE_BUFFER;
+                        NAND_USES_DMA;
 
        /* Default ECC settings */
        chip->ecc.mode = NAND_ECC_HW;
@@ -1997,86 +1949,91 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
        /* Scan to find existence of the device */
        ret = nand_scan(chip, nand->ncs);
        if (ret)
-               goto err_scan;
+               goto err_release_dma;
 
        ret = mtd_device_register(mtd, NULL, 0);
        if (ret)
-               goto err_device_register;
+               goto err_nand_cleanup;
 
-       platform_set_drvdata(pdev, fmc2);
+       platform_set_drvdata(pdev, nfc);
 
        return 0;
 
-err_device_register:
+err_nand_cleanup:
        nand_cleanup(chip);
 
-err_scan:
-       if (fmc2->dma_ecc_ch)
-               dma_release_channel(fmc2->dma_ecc_ch);
-       if (fmc2->dma_tx_ch)
-               dma_release_channel(fmc2->dma_tx_ch);
-       if (fmc2->dma_rx_ch)
-               dma_release_channel(fmc2->dma_rx_ch);
+err_release_dma:
+       if (nfc->dma_ecc_ch)
+               dma_release_channel(nfc->dma_ecc_ch);
+       if (nfc->dma_tx_ch)
+               dma_release_channel(nfc->dma_tx_ch);
+       if (nfc->dma_rx_ch)
+               dma_release_channel(nfc->dma_rx_ch);
 
-       sg_free_table(&fmc2->dma_data_sg);
-       sg_free_table(&fmc2->dma_ecc_sg);
+       sg_free_table(&nfc->dma_data_sg);
+       sg_free_table(&nfc->dma_ecc_sg);
 
-       clk_disable_unprepare(fmc2->clk);
+err_clk_disable:
+       clk_disable_unprepare(nfc->clk);
 
        return ret;
 }
 
-static int stm32_fmc2_remove(struct platform_device *pdev)
+static int stm32_fmc2_nfc_remove(struct platform_device *pdev)
 {
-       struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev);
-       struct stm32_fmc2_nand *nand = &fmc2->nand;
+       struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
+       struct stm32_fmc2_nand *nand = &nfc->nand;
+       struct nand_chip *chip = &nand->chip;
+       int ret;
 
-       nand_release(&nand->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
-       if (fmc2->dma_ecc_ch)
-               dma_release_channel(fmc2->dma_ecc_ch);
-       if (fmc2->dma_tx_ch)
-               dma_release_channel(fmc2->dma_tx_ch);
-       if (fmc2->dma_rx_ch)
-               dma_release_channel(fmc2->dma_rx_ch);
+       if (nfc->dma_ecc_ch)
+               dma_release_channel(nfc->dma_ecc_ch);
+       if (nfc->dma_tx_ch)
+               dma_release_channel(nfc->dma_tx_ch);
+       if (nfc->dma_rx_ch)
+               dma_release_channel(nfc->dma_rx_ch);
 
-       sg_free_table(&fmc2->dma_data_sg);
-       sg_free_table(&fmc2->dma_ecc_sg);
+       sg_free_table(&nfc->dma_data_sg);
+       sg_free_table(&nfc->dma_ecc_sg);
 
-       clk_disable_unprepare(fmc2->clk);
+       clk_disable_unprepare(nfc->clk);
 
        return 0;
 }
 
-static int __maybe_unused stm32_fmc2_suspend(struct device *dev)
+static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
 {
-       struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+       struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(fmc2->clk);
+       clk_disable_unprepare(nfc->clk);
 
        pinctrl_pm_select_sleep_state(dev);
 
        return 0;
 }
 
-static int __maybe_unused stm32_fmc2_resume(struct device *dev)
+static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
 {
-       struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
-       struct stm32_fmc2_nand *nand = &fmc2->nand;
+       struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+       struct stm32_fmc2_nand *nand = &nfc->nand;
        int chip_cs, ret;
 
        pinctrl_pm_select_default_state(dev);
 
-       ret = clk_prepare_enable(fmc2->clk);
+       ret = clk_prepare_enable(nfc->clk);
        if (ret) {
                dev_err(dev, "can not enable the clock\n");
                return ret;
        }
 
-       stm32_fmc2_init(fmc2);
+       stm32_fmc2_nfc_init(nfc);
 
        for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
-               if (!(fmc2->cs_assigned & BIT(chip_cs)))
+               if (!(nfc->cs_assigned & BIT(chip_cs)))
                        continue;
 
                nand_reset(&nand->chip, chip_cs);
@@ -2085,27 +2042,27 @@ static int __maybe_unused stm32_fmc2_resume(struct device *dev)
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend,
-                        stm32_fmc2_resume);
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
+                        stm32_fmc2_nfc_resume);
 
-static const struct of_device_id stm32_fmc2_match[] = {
+static const struct of_device_id stm32_fmc2_nfc_match[] = {
        {.compatible = "st,stm32mp15-fmc2"},
        {}
 };
-MODULE_DEVICE_TABLE(of, stm32_fmc2_match);
+MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
 
-static struct platform_driver stm32_fmc2_driver = {
-       .probe  = stm32_fmc2_probe,
-       .remove = stm32_fmc2_remove,
+static struct platform_driver stm32_fmc2_nfc_driver = {
+       .probe  = stm32_fmc2_nfc_probe,
+       .remove = stm32_fmc2_nfc_remove,
        .driver = {
-               .name = "stm32_fmc2_nand",
-               .of_match_table = stm32_fmc2_match,
-               .pm = &stm32_fmc2_pm_ops,
+               .name = "stm32_fmc2_nfc",
+               .of_match_table = stm32_fmc2_nfc_match,
+               .pm = &stm32_fmc2_nfc_pm_ops,
        },
 };
-module_platform_driver(stm32_fmc2_driver);
+module_platform_driver(stm32_fmc2_nfc_driver);
 
-MODULE_ALIAS("platform:stm32_fmc2_nand");
+MODULE_ALIAS("platform:stm32_fmc2_nfc");
 MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
 MODULE_LICENSE("GPL v2");
index 5f3e40b..ffbc165 100644 (file)
@@ -1698,7 +1698,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
                ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
                ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
                ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
-               nand->options |= NAND_USE_BOUNCE_BUFFER;
+               nand->options |= NAND_USES_DMA;
        } else {
                ecc->read_page = sunxi_nfc_hw_ecc_read_page;
                ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1907,7 +1907,8 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand,
        struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
        const struct nand_op_parser *parser;
 
-       sunxi_nfc_select_chip(nand, op->cs);
+       if (!check_only)
+               sunxi_nfc_select_chip(nand, op->cs);
 
        if (sunxi_nand->sels[op->cs].rb >= 0)
                parser = &sunxi_nfc_op_parser;
@@ -2003,7 +2004,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
        ret = mtd_device_register(mtd, NULL, 0);
        if (ret) {
                dev_err(dev, "failed to register mtd device: %d\n", ret);
-               nand_release(nand);
+               nand_cleanup(nand);
                return ret;
        }
 
@@ -2038,13 +2039,18 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
 static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
 {
        struct sunxi_nand_chip *sunxi_nand;
+       struct nand_chip *chip;
+       int ret;
 
        while (!list_empty(&nfc->chips)) {
                sunxi_nand = list_first_entry(&nfc->chips,
                                              struct sunxi_nand_chip,
                                              node);
-               nand_release(&sunxi_nand->nand);
-               sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc);
+               chip = &sunxi_nand->nand;
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
+               sunxi_nand_ecc_cleanup(&chip->ecc);
                list_del(&sunxi_nand->node);
        }
 }
index 9acf2de..246871e 100644 (file)
@@ -568,7 +568,7 @@ static int chip_init(struct device *dev, struct device_node *np)
        chip->legacy.select_chip = tango_select_chip;
        chip->legacy.cmd_ctrl = tango_cmd_ctrl;
        chip->legacy.dev_ready = tango_dev_ready;
-       chip->options = NAND_USE_BOUNCE_BUFFER |
+       chip->options = NAND_USES_DMA |
                        NAND_NO_SUBPAGE_WRITE |
                        NAND_WAIT_TCCS;
        chip->controller = &nfc->hw;
@@ -600,14 +600,19 @@ static int chip_init(struct device *dev, struct device_node *np)
 
 static int tango_nand_remove(struct platform_device *pdev)
 {
-       int cs;
        struct tango_nfc *nfc = platform_get_drvdata(pdev);
+       struct nand_chip *chip;
+       int cs, ret;
 
        dma_release_channel(nfc->chan);
 
        for (cs = 0; cs < MAX_CS; ++cs) {
-               if (nfc->chips[cs])
-                       nand_release(&nfc->chips[cs]->nand_chip);
+               if (nfc->chips[cs]) {
+                       chip = &nfc->chips[cs]->nand_chip;
+                       ret = mtd_device_unregister(nand_to_mtd(chip));
+                       WARN_ON(ret);
+                       nand_cleanup(chip);
+               }
        }
 
        return 0;
index 3cc9a4c..f9d046b 100644 (file)
@@ -467,7 +467,9 @@ static int tegra_nand_exec_op(struct nand_chip *chip,
                              const struct nand_operation *op,
                              bool check_only)
 {
-       tegra_nand_select_target(chip, op->cs);
+       if (!check_only)
+               tegra_nand_select_target(chip, op->cs);
+
        return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
                                      check_only);
 }
@@ -1113,7 +1115,7 @@ static int tegra_nand_chips_init(struct device *dev,
        if (!mtd->name)
                mtd->name = "tegra_nand";
 
-       chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
+       chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
 
        ret = nand_scan(chip, 1);
        if (ret)
index db030f1..843a868 100644 (file)
@@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev)
        if (!retval)
                return retval;
 
-       nand_release(nand_chip);
+       nand_cleanup(nand_chip);
 
 err_irq:
        tmio_hw_stop(dev, tmio);
@@ -458,8 +458,12 @@ err_irq:
 static int tmio_remove(struct platform_device *dev)
 {
        struct tmio_nand *tmio = platform_get_drvdata(dev);
+       struct nand_chip *chip = &tmio->chip;
+       int ret;
 
-       nand_release(&tmio->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        tmio_hw_stop(dev, tmio);
        return 0;
 }
index 2642d5b..47d9668 100644 (file)
@@ -371,7 +371,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
 static int __exit txx9ndfmc_remove(struct platform_device *dev)
 {
        struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
-       int i;
+       int ret, i;
 
        if (!drvdata)
                return 0;
@@ -385,7 +385,9 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
                chip = mtd_to_nand(mtd);
                txx9_priv = nand_get_controller_data(chip);
 
-               nand_release(chip);
+               ret = mtd_device_unregister(nand_to_mtd(chip));
+               WARN_ON(ret);
+               nand_cleanup(chip);
                kfree(txx9_priv->mtdname);
                kfree(txx9_priv);
        }
index 6b399a7..7248c59 100644 (file)
@@ -502,7 +502,9 @@ static int vf610_nfc_exec_op(struct nand_chip *chip,
                             const struct nand_operation *op,
                             bool check_only)
 {
-       vf610_nfc_select_target(chip, op->cs);
+       if (!check_only)
+               vf610_nfc_select_target(chip, op->cs);
+
        return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
                                      check_only);
 }
@@ -915,8 +917,12 @@ err_disable_clk:
 static int vf610_nfc_remove(struct platform_device *pdev)
 {
        struct vf610_nfc *nfc = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &nfc->chip;
+       int ret;
 
-       nand_release(&nfc->chip);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
+       WARN_ON(ret);
+       nand_cleanup(chip);
        clk_disable_unprepare(nfc->clk);
        return 0;
 }
index 834f794..94bfba9 100644 (file)
@@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev)
 
        err = mtd_device_register(mtd, NULL, 0);
        if (err)
-               nand_release(&data->chip);
+               nand_cleanup(&data->chip);
 
        return err;
 }
@@ -221,8 +221,12 @@ static int xway_nand_probe(struct platform_device *pdev)
 static int xway_nand_remove(struct platform_device *pdev)
 {
        struct xway_nand_data *data = platform_get_drvdata(pdev);
+       struct nand_chip *chip = &data->chip;
+       int ret;
 
-       nand_release(&data->chip);
+       ret = mtd_device_unregister(mtd);
+       WARN_ON(ret);
+       nand_cleanup(chip);
 
        return 0;
 }
index c86f2db..a79e4d8 100644 (file)
@@ -9,7 +9,7 @@
  *
  * mtdparts=<mtddef>[;<mtddef]
  * <mtddef>  := <mtd-id>:<partdef>[,<partdef>]
- * <partdef> := <size>[@<offset>][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk][slc]
  * <mtd-id>  := unique name used in mapping driver/device (mtd->name)
  * <size>    := standard linux memsize OR "-" to denote all remaining space
  *              size is automatically truncated at end of device
@@ -92,7 +92,7 @@ static struct mtd_partition * newpart(char *s,
        int name_len;
        unsigned char *extra_mem;
        char delim;
-       unsigned int mask_flags;
+       unsigned int mask_flags, add_flags;
 
        /* fetch the partition size */
        if (*s == '-') {
@@ -109,6 +109,7 @@ static struct mtd_partition * newpart(char *s,
 
        /* fetch partition name and flags */
        mask_flags = 0; /* this is going to be a regular partition */
+       add_flags = 0;
        delim = 0;
 
        /* check for offset */
@@ -152,6 +153,12 @@ static struct mtd_partition * newpart(char *s,
                s += 2;
        }
 
+       /* if slc is found use emulated SLC mode on this partition*/
+       if (!strncmp(s, "slc", 3)) {
+               add_flags |= MTD_SLC_ON_MLC_EMULATION;
+               s += 3;
+       }
+
        /* test if more partitions are following */
        if (*s == ',') {
                if (size == SIZE_REMAINING) {
@@ -184,6 +191,7 @@ static struct mtd_partition * newpart(char *s,
        parts[this_part].size = size;
        parts[this_part].offset = offset;
        parts[this_part].mask_flags = mask_flags;
+       parts[this_part].add_flags = add_flags;
        if (name)
                strlcpy(extra_mem, name, name_len + 1);
        else
@@ -218,12 +226,29 @@ static int mtdpart_setup_real(char *s)
                struct cmdline_mtd_partition *this_mtd;
                struct mtd_partition *parts;
                int mtd_id_len, num_parts;
-               char *p, *mtd_id;
+               char *p, *mtd_id, *semicol;
+
+               /*
+                * Replace the first ';' by a NULL char so strrchr can work
+                * properly.
+                */
+               semicol = strchr(s, ';');
+               if (semicol)
+                       *semicol = '\0';
 
                mtd_id = s;
 
-               /* fetch <mtd-id> */
-               p = strchr(s, ':');
+               /*
+                * fetch <mtd-id>. We use strrchr to ignore all ':' that could
+                * be present in the MTD name, only the last one is interpreted
+                * as an <mtd-id>/<part-definition> separator.
+                */
+               p = strrchr(s, ':');
+
+               /* Restore the ';' now. */
+               if (semicol)
+                       *semicol = ';';
+
                if (!p) {
                        pr_err("no mtd-id\n");
                        return -EINVAL;
index 3caeabf..daf507c 100644 (file)
@@ -117,6 +117,9 @@ static int parse_fixed_partitions(struct mtd_info *master,
                if (of_get_property(pp, "lock", &len))
                        parts[i].mask_flags |= MTD_POWERUP_LOCK;
 
+               if (of_property_read_bool(pp, "slc-mode"))
+                       parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION;
+
                i++;
        }
 
index 6e816ea..ffc4b38 100644 (file)
@@ -1,12 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig MTD_SPI_NOR
-       tristate "SPI-NOR device support"
+       tristate "SPI NOR device support"
        depends on MTD
        depends on MTD && SPI_MASTER
        select SPI_MEM
        help
          This is the framework for the SPI NOR which can be used by the SPI
-         device drivers and the SPI-NOR device driver.
+         device drivers and the SPI NOR device driver.
 
 if MTD_SPI_NOR
 
index 10b8666..d89a5ea 100644 (file)
@@ -21,11 +21,11 @@ config SPI_CADENCE_QUADSPI
          Flash as an MTD device.
 
 config SPI_HISI_SFC
-       tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
+       tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)"
        depends on ARCH_HISI || COMPILE_TEST
        depends on HAS_IOMEM
        help
-         This enables support for HiSilicon FMC SPI-NOR flash controller.
+         This enables support for HiSilicon FMC SPI NOR flash controller.
 
 config SPI_NXP_SPIFI
        tristate "NXP SPI Flash Interface (SPIFI)"
index ae85e4c..7225870 100644 (file)
@@ -727,7 +727,7 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
 
        /*
         * TODO: Adjust clocks if fast read is supported and interpret
-        * SPI-NOR flags to adjust controller settings.
+        * SPI NOR flags to adjust controller settings.
         */
        if (chip->nor.read_proto == SNOR_PROTO_1_1_1) {
                if (chip->nor.read_dummy == 0)
index 6c7a411..95c5021 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * HiSilicon FMC SPI-NOR flash controller driver
+ * HiSilicon FMC SPI NOR flash controller driver
  *
  * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
  */
index 9a5b1a7..5703e83 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ * SPI NOR driver for NXP SPI Flash Interface (SPIFI)
  *
  * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
  *
index cc68ea8..0369d98 100644 (file)
@@ -499,7 +499,7 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
  * the flash is ready for new commands.
  * @nor:       pointer to 'struct spi_nor'.
  *
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
  */
 static int spi_nor_xsr_ready(struct spi_nor *nor)
 {
@@ -542,7 +542,7 @@ static void spi_nor_clear_sr(struct spi_nor *nor)
  * for new commands.
  * @nor:       pointer to 'struct spi_nor'.
  *
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
  */
 static int spi_nor_sr_ready(struct spi_nor *nor)
 {
@@ -606,7 +606,7 @@ static void spi_nor_clear_fsr(struct spi_nor *nor)
  * ready for new commands.
  * @nor:       pointer to 'struct spi_nor'.
  *
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
  */
 static int spi_nor_fsr_ready(struct spi_nor *nor)
 {
@@ -640,14 +640,14 @@ static int spi_nor_fsr_ready(struct spi_nor *nor)
                return -EIO;
        }
 
-       return nor->bouncebuf[0] & FSR_READY;
+       return !!(nor->bouncebuf[0] & FSR_READY);
 }
 
 /**
  * spi_nor_ready() - Query the flash to see if it is ready for new commands.
  * @nor:       pointer to 'struct spi_nor'.
  *
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
  */
 static int spi_nor_ready(struct spi_nor *nor)
 {
@@ -2469,7 +2469,7 @@ static int spi_nor_select_read(struct spi_nor *nor,
        nor->read_proto = read->proto;
 
        /*
-        * In the spi-nor framework, we don't need to make the difference
+        * In the SPI NOR framework, we don't need to make the difference
         * between mode clock cycles and wait state clock cycles.
         * Indeed, the value of the mode clock cycles is used by a QSPI
         * flash memory to know whether it should enter or leave its 0-4-4
@@ -2675,7 +2675,7 @@ static int spi_nor_setup(struct spi_nor *nor,
 /**
  * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
  * settings based on MFR register and ->default_init() hook.
- * @nor:       pointer to a 'struct spi-nor'.
+ * @nor:       pointer to a 'struct spi_nor'.
  */
 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
 {
@@ -2690,7 +2690,7 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
 /**
  * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
  * based on JESD216 SFDP standard.
- * @nor:       pointer to a 'struct spi-nor'.
+ * @nor:       pointer to a 'struct spi_nor'.
  *
  * The method has a roll-back mechanism: in case the SFDP parsing fails, the
  * legacy flash parameters and settings will be restored.
@@ -2712,7 +2712,7 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor)
 /**
  * spi_nor_info_init_params() - Initialize the flash's parameters and settings
  * based on nor->info data.
- * @nor:       pointer to a 'struct spi-nor'.
+ * @nor:       pointer to a 'struct spi_nor'.
  */
 static void spi_nor_info_init_params(struct spi_nor *nor)
 {
@@ -2841,7 +2841,7 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
 
 /**
  * spi_nor_init_params() - Initialize the flash's parameters and settings.
- * @nor:       pointer to a 'struct spi-nor'.
+ * @nor:       pointer to a 'struct spi_nor'.
  *
  * The flash parameters and settings are initialized based on a sequence of
  * calls that are ordered by priority:
@@ -3126,7 +3126,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
        /*
         * Make sure the XSR_RDY flag is set before calling
         * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
-        * with Atmel spi-nor
+        * with Atmel SPI NOR.
         */
        if (info->flags & SPI_NOR_XSR_RDY)
                nor->flags |=  SNOR_F_READY_XSR_RDY;
index ab0f963..96735d8 100644 (file)
@@ -63,10 +63,16 @@ static const struct flash_info macronix_parts[] = {
                .fixups = &mx25l25635_fixups },
        { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
                              SECT_4K | SPI_NOR_4B_OPCODES) },
+       { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024,
+                             SECT_4K | SPI_NOR_DUAL_READ |
+                             SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
        { "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
                              SECT_4K | SPI_NOR_DUAL_READ |
                              SPI_NOR_QUAD_READ) },
        { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+       { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
+                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                             SPI_NOR_4B_OPCODES) },
        { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
                              SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                              SPI_NOR_4B_OPCODES) },
index 6c034b9..3dca5b9 100644 (file)
@@ -29,7 +29,9 @@ static const struct flash_info st_parts[] = {
        { "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128,
                              SECT_4K | SPI_NOR_QUAD_READ) },
        { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256,
-                             SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+                             SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+                             SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+                             SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
        { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256,
                              SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
        { "mt25ql256a",  INFO6(0x20ba19, 0x104400, 64 * 1024,  512,
@@ -59,6 +61,8 @@ static const struct flash_info st_parts[] = {
                              SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
        { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048,
                              SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+                             SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+                             SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
                              NO_CHIP_ERASE) },
        { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048,
                              SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
index f6038d3..55c0c50 100644 (file)
 #define SFDP_4BAIT_ID          0xff84  /* 4-byte Address Instruction Table */
 
 #define SFDP_SIGNATURE         0x50444653U
-#define SFDP_JESD216_MAJOR     1
-#define SFDP_JESD216_MINOR     0
-#define SFDP_JESD216A_MINOR    5
-#define SFDP_JESD216B_MINOR    6
 
 struct sfdp_header {
        u32             signature; /* Ox50444653U <=> "SFDP" */
@@ -437,7 +433,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
        struct sfdp_bfpt bfpt;
        size_t len;
        int i, cmd, err;
-       u32 addr;
+       u32 addr, val;
        u16 half;
        u8 erase_mask;
 
@@ -460,6 +456,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
        /* Number of address bytes. */
        switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
        case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+       case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
                nor->addr_width = 3;
                break;
 
@@ -472,21 +469,21 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
        }
 
        /* Flash Memory Density (in bits). */
-       params->size = bfpt.dwords[BFPT_DWORD(2)];
-       if (params->size & BIT(31)) {
-               params->size &= ~BIT(31);
+       val = bfpt.dwords[BFPT_DWORD(2)];
+       if (val & BIT(31)) {
+               val &= ~BIT(31);
 
                /*
                 * Prevent overflows on params->size. Anyway, a NOR of 2^64
                 * bits is unlikely to exist so this error probably means
                 * the BFPT we are reading is corrupted/wrong.
                 */
-               if (params->size > 63)
+               if (val > 63)
                        return -EINVAL;
 
-               params->size = 1ULL << params->size;
+               params->size = 1ULL << val;
        } else {
-               params->size++;
+               params->size = val + 1;
        }
        params->size >>= 3; /* Convert to bytes. */
 
@@ -548,15 +545,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
                                  SNOR_ERASE_TYPE_MASK;
 
        /* Stop here if not JESD216 rev A or later. */
-       if (bfpt_header->length < BFPT_DWORD_MAX)
+       if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
                return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
                                                params);
 
        /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
-       params->page_size = bfpt.dwords[BFPT_DWORD(11)];
-       params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
-       params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
-       params->page_size = 1U << params->page_size;
+       val = bfpt.dwords[BFPT_DWORD(11)];
+       val &= BFPT_DWORD11_PAGE_SIZE_MASK;
+       val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+       params->page_size = 1U << val;
 
        /* Quad Enable Requirements. */
        switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
@@ -604,6 +601,11 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
                return -EINVAL;
        }
 
+       /* Stop here if not JESD216 rev C or later. */
+       if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
+               return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+                                               params);
+
        return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
 }
 
index e0a8ded..7f9846b 100644 (file)
@@ -7,14 +7,20 @@
 #ifndef __LINUX_MTD_SFDP_H
 #define __LINUX_MTD_SFDP_H
 
+/* SFDP revisions */
+#define SFDP_JESD216_MAJOR     1
+#define SFDP_JESD216_MINOR     0
+#define SFDP_JESD216A_MINOR    5
+#define SFDP_JESD216B_MINOR    6
+
 /* Basic Flash Parameter Table */
 
 /*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs.
  * They are indexed from 1 but C arrays are indexed from 0.
  */
 #define BFPT_DWORD(i)          ((i) - 1)
-#define BFPT_DWORD_MAX         16
+#define BFPT_DWORD_MAX         20
 
 struct sfdp_bfpt {
        u32     dwords[BFPT_DWORD_MAX];
@@ -22,6 +28,7 @@ struct sfdp_bfpt {
 
 /* The first version of JESD216 defined only 9 DWORDs. */
 #define BFPT_DWORD_MAX_JESD216                 9
+#define BFPT_DWORD_MAX_JESD216B                        16
 
 /* 1st DWORD. */
 #define BFPT_DWORD1_FAST_READ_1_1_2            BIT(16)
index 6756202..e550cd5 100644 (file)
@@ -8,6 +8,27 @@
 
 #include "core.h"
 
+static int
+s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
+                        const struct sfdp_parameter_header *bfpt_header,
+                        const struct sfdp_bfpt *bfpt,
+                        struct spi_nor_flash_parameter *params)
+{
+       /*
+        * The S25FS-S chip family reports 512-byte pages in BFPT but
+        * in reality the write buffer still wraps at the safe default
+        * of 256 bytes.  Overwrite the page size advertised by BFPT
+        * to get the writes working.
+        */
+       params->page_size = 256;
+
+       return 0;
+}
+
+static struct spi_nor_fixups s25fs_s_fixups = {
+       .post_bfpt = s25fs_s_post_bfpt_fixups,
+};
+
 static const struct flash_info spansion_parts[] = {
        /* Spansion/Cypress -- single (large) sector size only, at least
         * for the chips listed here (without boot sectors).
@@ -22,16 +43,27 @@ static const struct flash_info spansion_parts[] = {
        { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
                              SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                              USE_CLSR) },
-       { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
-       { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512,
-                            SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-                            USE_CLSR) },
+       { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128,
+                             SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                             USE_CLSR) },
+       { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512,
+                             SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                             USE_CLSR) },
        { "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
                              SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                              SPI_NOR_HAS_LOCK | USE_CLSR) },
-       { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+       { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256,
+                             SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
+         .fixups = &s25fs_s_fixups, },
+       { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128,
+                             SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                             USE_CLSR) },
+       { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512,
                              SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                              USE_CLSR) },
+       { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+                             SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
+         .fixups = &s25fs_s_fixups, },
        { "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
        { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
        { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
@@ -70,6 +102,8 @@ static const struct flash_info spansion_parts[] = {
        { "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512,
                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                             SPI_NOR_4B_OPCODES) },
+       { "cy15x104q",  INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
+                             SPI_NOR_NO_ERASE) },
 };
 
 static void spansion_post_sfdp_fixups(struct spi_nor *nor)
index 17deaba..5062af1 100644 (file)
@@ -8,6 +8,31 @@
 
 #include "core.h"
 
+static int
+w25q256_post_bfpt_fixups(struct spi_nor *nor,
+                        const struct sfdp_parameter_header *bfpt_header,
+                        const struct sfdp_bfpt *bfpt,
+                        struct spi_nor_flash_parameter *params)
+{
+       /*
+        * W25Q256JV supports 4B opcodes but W25Q256FV does not.
+        * Unfortunately, Winbond has re-used the same JEDEC ID for both
+        * variants which prevents us from defining a new entry in the parts
+        * table.
+        * To differentiate between W25Q256JV and W25Q256FV check SFDP header
+        * version: only JV has JESD216A compliant structure (version 5).
+        */
+       if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+           bfpt_header->minor == SFDP_JESD216A_MINOR)
+               nor->flags |= SNOR_F_4B_OPCODES;
+
+       return 0;
+}
+
+static struct spi_nor_fixups w25q256_fixups = {
+       .post_bfpt = w25q256_post_bfpt_fixups,
+};
+
 static const struct flash_info winbond_parts[] = {
        /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
        { "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
@@ -53,8 +78,8 @@ static const struct flash_info winbond_parts[] = {
        { "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
        { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
        { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
-                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-                         SPI_NOR_4B_OPCODES) },
+                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+         .fixups = &w25q256_fixups },
        { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
index 12c0234..e85b04e 100644 (file)
@@ -867,8 +867,11 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
         * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
         * MLC NAND is different and needs special care, otherwise UBI or UBIFS
         * will die soon and you will lose all your data.
+        * Relax this rule if the partition we're attaching to operates in SLC
+        * mode.
         */
-       if (mtd->type == MTD_MLCNANDFLASH) {
+       if (mtd->type == MTD_MLCNANDFLASH &&
+           !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
                pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
                        mtd->index);
                return -EINVAL;
index b486250..83afc00 100644 (file)
@@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi)
        wl_pool->size = 0;
        pool->size = 0;
 
+       if (ubi->fm_anchor) {
+               wl_tree_add(ubi->fm_anchor, &ubi->free);
+               ubi->free_count++;
+       }
+       if (ubi->fm_next_anchor) {
+               wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+               ubi->free_count++;
+       }
+
+       /* All available PEBs are in ubi->free, now is the time to get
+        * the best anchor PEBs.
+        */
+       ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+       ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+
        for (;;) {
                enough = 0;
                if (pool->size < pool->max_size) {
@@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
 {
        struct ubi_work *wrk;
-       struct ubi_wl_entry *anchor;
 
        spin_lock(&ubi->wl_lock);
 
-       /* Do we already have an anchor? */
-       if (ubi->fm_anchor) {
-               spin_unlock(&ubi->wl_lock);
-               return 0;
-       }
-
-       /* See if we can find an anchor PEB on the list of free PEBs */
-       anchor = ubi_wl_get_fm_peb(ubi, 1);
-       if (anchor) {
-               ubi->fm_anchor = anchor;
-               spin_unlock(&ubi->wl_lock);
-               return 0;
+       /* Do we have a next anchor? */
+       if (!ubi->fm_next_anchor) {
+               ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+               if (!ubi->fm_next_anchor)
+                       /* Tell wear leveling to produce a new anchor PEB */
+                       ubi->fm_do_produce_anchor = 1;
        }
 
-       /* No luck, trigger wear leveling to produce a new anchor PEB */
-       ubi->fm_do_produce_anchor = 1;
+       /* Do wear leveling to get a new anchor PEB or check the
+        * existing next anchor candidate.
+        */
        if (ubi->wl_scheduled) {
                spin_unlock(&ubi->wl_lock);
                return 0;
index 53f448e..022af59 100644 (file)
@@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
                fm_pos += sizeof(*fec);
                ubi_assert(fm_pos <= ubi->fm_size);
        }
+       if (ubi->fm_next_anchor) {
+               fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+               fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
+               set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
+               fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
+
+               free_peb_count++;
+               fm_pos += sizeof(*fec);
+               ubi_assert(fm_pos <= ubi->fm_size);
+       }
        fmh->free_peb_count = cpu_to_be32(free_peb_count);
 
        ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
index da0bee1..c2da771 100644 (file)
@@ -491,7 +491,8 @@ struct ubi_debug_info {
  * @fm_work: fastmap work queue
  * @fm_work_scheduled: non-zero if fastmap work was scheduled
  * @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_anchor: The new anchor PEB used during fastmap update
+ * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
  * @fm_do_produce_anchor: If true produce an anchor PEB in wl
  *
  * @used: RB-tree of used physical eraseblocks
@@ -602,6 +603,7 @@ struct ubi_device {
        int fm_work_scheduled;
        int fast_attach;
        struct ubi_wl_entry *fm_anchor;
+       struct ubi_wl_entry *fm_next_anchor;
        int fm_do_produce_anchor;
 
        /* Wear-leveling sub-system's stuff */
index 5146cce..2763606 100644 (file)
@@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
        }
 
 #ifdef CONFIG_MTD_UBI_FASTMAP
+       e1 = find_anchor_wl_entry(&ubi->used);
+       if (e1 && ubi->fm_next_anchor &&
+           (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+               ubi->fm_do_produce_anchor = 1;
+               /* fm_next_anchor is no longer considered a good anchor
+                * candidate.
+                * NULL assignment also prevents multiple wear level checks
+                * of this PEB.
+                */
+               wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+               ubi->fm_next_anchor = NULL;
+               ubi->free_count++;
+       }
+
        if (ubi->fm_do_produce_anchor) {
-               e1 = find_anchor_wl_entry(&ubi->used);
                if (!e1)
                        goto out_cancel;
                e2 = get_peb_for_wl(ubi);
                if (!e2)
                        goto out_cancel;
 
-               /*
-                * Anchor move within the anchor area is useless.
-                */
-               if (e2->pnum < UBI_FM_MAX_START)
-                       goto out_cancel;
-
                self_check_in_wl_tree(ubi, e1, &ubi->used);
                rb_erase(&e1->u.rb, &ubi->used);
                dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
@@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
        if (!err) {
                spin_lock(&ubi->wl_lock);
 
-               if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
-                       ubi->fm_anchor = e;
+               if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
+                       /* Abort anchor production, if needed it will be
+                        * enabled again in the wear leveling started below.
+                        */
+                       ubi->fm_next_anchor = e;
                        ubi->fm_do_produce_anchor = 0;
                } else {
                        wl_tree_add(e, &ubi->free);
index 1538ad1..1f2f2e8 100644 (file)
@@ -50,7 +50,7 @@ config CAIF_HSI
 
 config CAIF_VIRTIO
        tristate "CAIF virtio transport driver"
-       depends on CAIF && HAS_DMA && VHOST_DPN
+       depends on CAIF && HAS_DMA
        select VHOST_RING
        select VIRTIO
        select GENERIC_ALLOCATOR
index e4f01e7..dfbd3d1 100644 (file)
@@ -664,7 +664,7 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
        return ret;
 }
 
-static int _generic_set_opp_regulator(const struct opp_table *opp_table,
+static int _generic_set_opp_regulator(struct opp_table *opp_table,
                                      struct device *dev,
                                      unsigned long old_freq,
                                      unsigned long freq,
@@ -699,6 +699,18 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
                        goto restore_freq;
        }
 
+       /*
+        * Enable the regulator after setting its voltages, otherwise it breaks
+        * some boot-enabled regulators.
+        */
+       if (unlikely(!opp_table->regulator_enabled)) {
+               ret = regulator_enable(reg);
+               if (ret < 0)
+                       dev_warn(dev, "Failed to enable regulator: %d", ret);
+               else
+                       opp_table->regulator_enabled = true;
+       }
+
        return 0;
 
 restore_freq:
@@ -713,6 +725,34 @@ restore_voltage:
        return ret;
 }
 
+static int _set_opp_bw(const struct opp_table *opp_table,
+                      struct dev_pm_opp *opp, struct device *dev, bool remove)
+{
+       u32 avg, peak;
+       int i, ret;
+
+       if (!opp_table->paths)
+               return 0;
+
+       for (i = 0; i < opp_table->path_count; i++) {
+               if (remove) {
+                       avg = 0;
+                       peak = 0;
+               } else {
+                       avg = opp->bandwidth[i].avg;
+                       peak = opp->bandwidth[i].peak;
+               }
+               ret = icc_set_bw(opp_table->paths[i], avg, peak);
+               if (ret) {
+                       dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
+                               remove ? "remove" : "set", i, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 static int _set_opp_custom(const struct opp_table *opp_table,
                           struct device *dev, unsigned long old_freq,
                           unsigned long freq,
@@ -817,15 +857,31 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
        }
 
        if (unlikely(!target_freq)) {
-               if (opp_table->required_opp_tables) {
-                       ret = _set_required_opps(dev, opp_table, NULL);
-               } else if (!_get_opp_count(opp_table)) {
+               /*
+                * Some drivers need to support cases where some platforms may
+                * have OPP table for the device, while others don't and
+                * opp_set_rate() just needs to behave like clk_set_rate().
+                */
+               if (!_get_opp_count(opp_table))
                        return 0;
-               } else {
+
+               if (!opp_table->required_opp_tables && !opp_table->regulators &&
+                   !opp_table->paths) {
                        dev_err(dev, "target frequency can't be 0\n");
                        ret = -EINVAL;
+                       goto put_opp_table;
+               }
+
+               ret = _set_opp_bw(opp_table, NULL, dev, true);
+               if (ret)
+                       return ret;
+
+               if (opp_table->regulator_enabled) {
+                       regulator_disable(opp_table->regulators[0]);
+                       opp_table->regulator_enabled = false;
                }
 
+               ret = _set_required_opps(dev, opp_table, NULL);
                goto put_opp_table;
        }
 
@@ -909,6 +965,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
                        dev_err(dev, "Failed to set required opps: %d\n", ret);
        }
 
+       if (!ret)
+               ret = _set_opp_bw(opp_table, opp, dev, false);
+
 put_opp:
        dev_pm_opp_put(opp);
 put_old_opp:
@@ -999,6 +1058,12 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
                                ret);
        }
 
+       /* Find interconnect path(s) for the device */
+       ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
+       if (ret)
+               dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
+                        __func__, ret);
+
        BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
        INIT_LIST_HEAD(&opp_table->opp_list);
        kref_init(&opp_table->kref);
@@ -1057,6 +1122,7 @@ static void _opp_table_kref_release(struct kref *kref)
 {
        struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
        struct opp_device *opp_dev, *temp;
+       int i;
 
        _of_clear_opp_table(opp_table);
 
@@ -1064,6 +1130,12 @@ static void _opp_table_kref_release(struct kref *kref)
        if (!IS_ERR(opp_table->clk))
                clk_put(opp_table->clk);
 
+       if (opp_table->paths) {
+               for (i = 0; i < opp_table->path_count; i++)
+                       icc_put(opp_table->paths[i]);
+               kfree(opp_table->paths);
+       }
+
        WARN_ON(!list_empty(&opp_table->opp_list));
 
        list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
@@ -1243,19 +1315,23 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
 struct dev_pm_opp *_opp_allocate(struct opp_table *table)
 {
        struct dev_pm_opp *opp;
-       int count, supply_size;
+       int supply_count, supply_size, icc_size;
 
        /* Allocate space for at least one supply */
-       count = table->regulator_count > 0 ? table->regulator_count : 1;
-       supply_size = sizeof(*opp->supplies) * count;
+       supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+       supply_size = sizeof(*opp->supplies) * supply_count;
+       icc_size = sizeof(*opp->bandwidth) * table->path_count;
 
        /* allocate new OPP node and supplies structures */
-       opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+       opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
+
        if (!opp)
                return NULL;
 
        /* Put the supplies at the end of the OPP structure as an empty array */
        opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+       if (icc_size)
+               opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
        INIT_LIST_HEAD(&opp->node);
 
        return opp;
@@ -1286,11 +1362,24 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
        return true;
 }
 
+int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+       if (opp1->rate != opp2->rate)
+               return opp1->rate < opp2->rate ? -1 : 1;
+       if (opp1->bandwidth && opp2->bandwidth &&
+           opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
+               return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+       if (opp1->level != opp2->level)
+               return opp1->level < opp2->level ? -1 : 1;
+       return 0;
+}
+
 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
                             struct opp_table *opp_table,
                             struct list_head **head)
 {
        struct dev_pm_opp *opp;
+       int opp_cmp;
 
        /*
         * Insert new OPP in order of increasing frequency and discard if
@@ -1301,12 +1390,13 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
         * loop.
         */
        list_for_each_entry(opp, &opp_table->opp_list, node) {
-               if (new_opp->rate > opp->rate) {
+               opp_cmp = _opp_compare_key(new_opp, opp);
+               if (opp_cmp > 0) {
                        *head = &opp->node;
                        continue;
                }
 
-               if (new_opp->rate < opp->rate)
+               if (opp_cmp < 0)
                        return 0;
 
                /* Duplicate OPPs */
@@ -1670,6 +1760,13 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
+       if (opp_table->regulator_enabled) {
+               for (i = opp_table->regulator_count - 1; i >= 0; i--)
+                       regulator_disable(opp_table->regulators[i]);
+
+               opp_table->regulator_enabled = false;
+       }
+
        for (i = opp_table->regulator_count - 1; i >= 0; i--)
                regulator_put(opp_table->regulators[i]);
 
index 609665e..596c185 100644 (file)
@@ -32,6 +32,47 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
        debugfs_remove_recursive(opp->dentry);
 }
 
+static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
+                           size_t count, loff_t *ppos)
+{
+       struct icc_path *path = fp->private_data;
+       char buf[64];
+       int i;
+
+       i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
+
+       return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static const struct file_operations bw_name_fops = {
+       .open = simple_open,
+       .read = bw_name_read,
+       .llseek = default_llseek,
+};
+
+static void opp_debug_create_bw(struct dev_pm_opp *opp,
+                               struct opp_table *opp_table,
+                               struct dentry *pdentry)
+{
+       struct dentry *d;
+       char name[11];
+       int i;
+
+       for (i = 0; i < opp_table->path_count; i++) {
+               snprintf(name, sizeof(name), "icc-path-%.1d", i);
+
+               /* Create per-path directory */
+               d = debugfs_create_dir(name, pdentry);
+
+               debugfs_create_file("name", S_IRUGO, d, opp_table->paths[i],
+                                   &bw_name_fops);
+               debugfs_create_u32("peak_bw", S_IRUGO, d,
+                                  &opp->bandwidth[i].peak);
+               debugfs_create_u32("avg_bw", S_IRUGO, d,
+                                  &opp->bandwidth[i].avg);
+       }
+}
+
 static void opp_debug_create_supplies(struct dev_pm_opp *opp,
                                      struct opp_table *opp_table,
                                      struct dentry *pdentry)
@@ -94,6 +135,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
                             &opp->clock_latency_ns);
 
        opp_debug_create_supplies(opp, opp_table, d);
+       opp_debug_create_bw(opp, opp_table, d);
 
        opp->dentry = d;
 }
index 9cd8f0a..9a58735 100644 (file)
@@ -332,6 +332,105 @@ free_required_opps:
        return ret;
 }
 
+static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
+{
+       struct device_node *np, *opp_np;
+       struct property *prop;
+
+       if (!opp_table) {
+               np = of_node_get(dev->of_node);
+               if (!np)
+                       return -ENODEV;
+
+               opp_np = _opp_of_get_opp_desc_node(np, 0);
+               of_node_put(np);
+       } else {
+               opp_np = of_node_get(opp_table->np);
+       }
+
+       /* Lets not fail in case we are parsing opp-v1 bindings */
+       if (!opp_np)
+               return 0;
+
+       /* Checking only first OPP is sufficient */
+       np = of_get_next_available_child(opp_np, NULL);
+       if (!np) {
+               dev_err(dev, "OPP table empty\n");
+               return -EINVAL;
+       }
+       of_node_put(opp_np);
+
+       prop = of_find_property(np, "opp-peak-kBps", NULL);
+       of_node_put(np);
+
+       if (!prop || !prop->length)
+               return 0;
+
+       return 1;
+}
+
+int dev_pm_opp_of_find_icc_paths(struct device *dev,
+                                struct opp_table *opp_table)
+{
+       struct device_node *np;
+       int ret, i, count, num_paths;
+       struct icc_path **paths;
+
+       ret = _bandwidth_supported(dev, opp_table);
+       if (ret <= 0)
+               return ret;
+
+       ret = 0;
+
+       np = of_node_get(dev->of_node);
+       if (!np)
+               return 0;
+
+       count = of_count_phandle_with_args(np, "interconnects",
+                                          "#interconnect-cells");
+       of_node_put(np);
+       if (count < 0)
+               return 0;
+
+       /* two phandles when #interconnect-cells = <1> */
+       if (count % 2) {
+               dev_err(dev, "%s: Invalid interconnects values\n", __func__);
+               return -EINVAL;
+       }
+
+       num_paths = count / 2;
+       paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
+       if (!paths)
+               return -ENOMEM;
+
+       for (i = 0; i < num_paths; i++) {
+               paths[i] = of_icc_get_by_index(dev, i);
+               if (IS_ERR(paths[i])) {
+                       ret = PTR_ERR(paths[i]);
+                       if (ret != -EPROBE_DEFER) {
+                               dev_err(dev, "%s: Unable to get path%d: %d\n",
+                                       __func__, i, ret);
+                       }
+                       goto err;
+               }
+       }
+
+       if (opp_table) {
+               opp_table->paths = paths;
+               opp_table->path_count = num_paths;
+               return 0;
+       }
+
+err:
+       while (i--)
+               icc_put(paths[i]);
+
+       kfree(paths);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
+
 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
                              struct device_node *np)
 {
@@ -521,6 +620,90 @@ void dev_pm_opp_of_remove_table(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
 
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+                   struct device_node *np, bool peak)
+{
+       const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
+       struct property *prop;
+       int i, count, ret;
+       u32 *bw;
+
+       prop = of_find_property(np, name, NULL);
+       if (!prop)
+               return -ENODEV;
+
+       count = prop->length / sizeof(u32);
+       if (table->path_count != count) {
+               pr_err("%s: Mismatch between %s and paths (%d %d)\n",
+                               __func__, name, count, table->path_count);
+               return -EINVAL;
+       }
+
+       bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
+       if (!bw)
+               return -ENOMEM;
+
+       ret = of_property_read_u32_array(np, name, bw, count);
+       if (ret) {
+               pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
+               goto out;
+       }
+
+       for (i = 0; i < count; i++) {
+               if (peak)
+                       new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
+               else
+                       new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
+       }
+
+out:
+       kfree(bw);
+       return ret;
+}
+
+static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
+                        struct device_node *np, bool *rate_not_available)
+{
+       bool found = false;
+       u64 rate;
+       int ret;
+
+       ret = of_property_read_u64(np, "opp-hz", &rate);
+       if (!ret) {
+               /*
+                * Rate is defined as an unsigned long in clk API, and so
+                * casting explicitly to its type. Must be fixed once rate is 64
+                * bit guaranteed in clk API.
+                */
+               new_opp->rate = (unsigned long)rate;
+               found = true;
+       }
+       *rate_not_available = !!ret;
+
+       /*
+        * Bandwidth consists of peak and average (optional) values:
+        * opp-peak-kBps = <path1_value path2_value>;
+        * opp-avg-kBps = <path1_value path2_value>;
+        */
+       ret = _read_bw(new_opp, table, np, true);
+       if (!ret) {
+               found = true;
+               ret = _read_bw(new_opp, table, np, false);
+       }
+
+       /* The properties were found but we failed to parse them */
+       if (ret && ret != -ENODEV)
+               return ret;
+
+       if (!of_property_read_u32(np, "opp-level", &new_opp->level))
+               found = true;
+
+       if (found)
+               return 0;
+
+       return ret;
+}
+
 /**
  * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  * @opp_table: OPP table
@@ -558,26 +741,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
        if (!new_opp)
                return ERR_PTR(-ENOMEM);
 
-       ret = of_property_read_u64(np, "opp-hz", &rate);
-       if (ret < 0) {
-               /* "opp-hz" is optional for devices like power domains. */
-               if (!opp_table->is_genpd) {
-                       dev_err(dev, "%s: opp-hz not found\n", __func__);
-                       goto free_opp;
-               }
-
-               rate_not_available = true;
-       } else {
-               /*
-                * Rate is defined as an unsigned long in clk API, and so
-                * casting explicitly to its type. Must be fixed once rate is 64
-                * bit guaranteed in clk API.
-                */
-               new_opp->rate = (unsigned long)rate;
+       ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+       if (ret < 0 && !opp_table->is_genpd) {
+               dev_err(dev, "%s: opp key field not found\n", __func__);
+               goto free_opp;
        }
 
-       of_property_read_u32(np, "opp-level", &new_opp->level);
-
        /* Check if the OPP supports hardware's hierarchy of versions or not */
        if (!_opp_is_supported(dev, opp_table, np)) {
                dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
index d14e271..e51646f 100644 (file)
@@ -12,6 +12,7 @@
 #define __DRIVER_OPP_H__
 
 #include <linux/device.h>
+#include <linux/interconnect.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
@@ -59,6 +60,7 @@ extern struct list_head opp_tables;
  * @rate:      Frequency in hertz
  * @level:     Performance level
  * @supplies:  Power supplies voltage/current values
+ * @bandwidth: Interconnect bandwidth values
  * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
  *             frequency from any other OPP's frequency.
  * @required_opps: List of OPPs that are required by this OPP.
@@ -81,6 +83,7 @@ struct dev_pm_opp {
        unsigned int level;
 
        struct dev_pm_opp_supply *supplies;
+       struct dev_pm_opp_icc_bw *bandwidth;
 
        unsigned long clock_latency_ns;
 
@@ -144,8 +147,11 @@ enum opp_table_access {
  * @clk: Device's clock handle
  * @regulators: Supply regulators
  * @regulator_count: Number of power supply regulators. Its value can be -1
+ * @regulator_enabled: Set to true if regulators were previously enabled.
  * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
  * property).
+ * @paths: Interconnect path handles
+ * @path_count: Number of interconnect paths
  * @genpd_performance_state: Device's power domain support performance state.
  * @is_genpd: Marks if the OPP table belongs to a genpd.
  * @set_opp: Platform specific set_opp callback
@@ -189,6 +195,9 @@ struct opp_table {
        struct clk *clk;
        struct regulator **regulators;
        int regulator_count;
+       bool regulator_enabled;
+       struct icc_path **paths;
+       unsigned int path_count;
        bool genpd_performance_state;
        bool is_genpd;
 
@@ -211,6 +220,7 @@ struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_
 void _dev_pm_opp_find_and_remove_table(struct device *dev);
 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
 void _opp_free(struct dev_pm_opp *opp);
+int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
 void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
index 8dd1278..7719ae4 100644 (file)
@@ -35,7 +35,7 @@
 /* L3C has 8-counters */
 #define L3C_NR_COUNTERS                0x8
 
-#define L3C_PERF_CTRL_EN       0x20000
+#define L3C_PERF_CTRL_EN       0x10000
 #define L3C_EVTYPE_NONE                0xff
 
 /*
index e814006..3e1ceb8 100644 (file)
@@ -10,7 +10,7 @@ if VDPA
 
 config VDPA_SIM
        tristate "vDPA device simulator"
-       depends on RUNTIME_TESTING_MENU && HAS_DMA && VHOST_DPN
+       depends on RUNTIME_TESTING_MENU && HAS_DMA
        select VHOST_RING
        default n
        help
index e24371d..94bf032 100644 (file)
@@ -185,6 +185,9 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 
 void ifcvf_reset(struct ifcvf_hw *hw)
 {
+       hw->config_cb.callback = NULL;
+       hw->config_cb.private = NULL;
+
        ifcvf_set_status(hw, 0);
        /* flush set_status, make sure VF is stopped, reset */
        ifcvf_get_status(hw);
index e803070..f455441 100644 (file)
@@ -27,6 +27,7 @@
                ((1ULL << VIRTIO_NET_F_MAC)                     | \
                 (1ULL << VIRTIO_F_ANY_LAYOUT)                  | \
                 (1ULL << VIRTIO_F_VERSION_1)                   | \
+                (1ULL << VIRTIO_NET_F_STATUS)                  | \
                 (1ULL << VIRTIO_F_ORDER_PLATFORM)              | \
                 (1ULL << VIRTIO_F_IOMMU_PLATFORM)              | \
                 (1ULL << VIRTIO_NET_F_MRG_RXBUF))
@@ -81,6 +82,9 @@ struct ifcvf_hw {
        void __iomem *net_cfg;
        struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
        void __iomem * const *base;
+       char config_msix_name[256];
+       struct vdpa_callback config_cb;
+
 };
 
 struct ifcvf_adapter {
index abf6a06..f5a60c1 100644 (file)
 #define DRIVER_AUTHOR   "Intel Corporation"
 #define IFCVF_DRIVER_NAME       "ifcvf"
 
+static irqreturn_t ifcvf_config_changed(int irq, void *arg)
+{
+       struct ifcvf_hw *vf = arg;
+
+       if (vf->config_cb.callback)
+               return vf->config_cb.callback(vf->config_cb.private);
+
+       return IRQ_HANDLED;
+}
+
 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
 {
        struct vring_info *vring = arg;
@@ -28,6 +38,68 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static void ifcvf_free_irq_vectors(void *data)
+{
+       pci_free_irq_vectors(data);
+}
+
+static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int i;
+
+
+       for (i = 0; i < queues; i++)
+               devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+
+       ifcvf_free_irq_vectors(pdev);
+}
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct ifcvf_hw *vf = &adapter->vf;
+       int vector, i, ret, irq;
+
+       ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
+                                   IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+       if (ret < 0) {
+               IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
+               return ret;
+       }
+
+       snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
+                pci_name(pdev));
+       vector = 0;
+       irq = pci_irq_vector(pdev, vector);
+       ret = devm_request_irq(&pdev->dev, irq,
+                              ifcvf_config_changed, 0,
+                              vf->config_msix_name, vf);
+
+       for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
+                        pci_name(pdev), i);
+               vector = i + IFCVF_MSI_QUEUE_OFF;
+               irq = pci_irq_vector(pdev, vector);
+               ret = devm_request_irq(&pdev->dev, irq,
+                                      ifcvf_intr_handler, 0,
+                                      vf->vring[i].msix_name,
+                                      &vf->vring[i]);
+               if (ret) {
+                       IFCVF_ERR(pdev,
+                                 "Failed to request irq for vq %d\n", i);
+                       ifcvf_free_irq(adapter, i);
+
+                       return ret;
+               }
+
+               vf->vring[i].irq = irq;
+       }
+
+       return 0;
+}
+
 static int ifcvf_start_datapath(void *private)
 {
        struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
@@ -118,17 +190,37 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 {
        struct ifcvf_adapter *adapter;
        struct ifcvf_hw *vf;
+       u8 status_old;
+       int ret;
 
        vf  = vdpa_to_vf(vdpa_dev);
        adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+       status_old = ifcvf_get_status(vf);
 
-       if (status == 0) {
+       if (status_old == status)
+               return;
+
+       if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
+           !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
                ifcvf_stop_datapath(adapter);
+               ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
+       }
+
+       if (status == 0) {
                ifcvf_reset_vring(adapter);
                return;
        }
 
-       if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+       if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+           !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
+               ret = ifcvf_request_irq(adapter);
+               if (ret) {
+                       status = ifcvf_get_status(vf);
+                       status |= VIRTIO_CONFIG_S_FAILED;
+                       ifcvf_set_status(vf, status);
+                       return;
+               }
+
                if (ifcvf_start_datapath(adapter) < 0)
                        IFCVF_ERR(adapter->pdev,
                                  "Failed to set ifcvf vdpa  status %u\n",
@@ -254,7 +346,10 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
                                     struct vdpa_callback *cb)
 {
-       /* We don't support config interrupt */
+       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+       vf->config_cb.callback = cb->callback;
+       vf->config_cb.private = cb->private;
 }
 
 /*
@@ -284,38 +379,6 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
        .set_config_cb  = ifcvf_vdpa_set_config_cb,
 };
 
-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
-{
-       struct pci_dev *pdev = adapter->pdev;
-       struct ifcvf_hw *vf = &adapter->vf;
-       int vector, i, ret, irq;
-
-
-       for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
-               snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
-                        pci_name(pdev), i);
-               vector = i + IFCVF_MSI_QUEUE_OFF;
-               irq = pci_irq_vector(pdev, vector);
-               ret = devm_request_irq(&pdev->dev, irq,
-                                      ifcvf_intr_handler, 0,
-                                      vf->vring[i].msix_name,
-                                      &vf->vring[i]);
-               if (ret) {
-                       IFCVF_ERR(pdev,
-                                 "Failed to request irq for vq %d\n", i);
-                       return ret;
-               }
-               vf->vring[i].irq = irq;
-       }
-
-       return 0;
-}
-
-static void ifcvf_free_irq_vectors(void *data)
-{
-       pci_free_irq_vectors(data);
-}
-
 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct device *dev = &pdev->dev;
@@ -349,13 +412,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return ret;
        }
 
-       ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
-                                   IFCVF_MAX_INTR, PCI_IRQ_MSIX);
-       if (ret < 0) {
-               IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
-               return ret;
-       }
-
        ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
        if (ret) {
                IFCVF_ERR(pdev,
@@ -379,12 +435,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        adapter->pdev = pdev;
        adapter->vdpa.dma_dev = &pdev->dev;
 
-       ret = ifcvf_request_irq(adapter);
-       if (ret) {
-               IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
-               goto err;
-       }
-
        ret = ifcvf_init_hw(vf, pdev);
        if (ret) {
                IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
index 01c456f..c7334cc 100644 (file)
@@ -101,7 +101,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
 
 static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
 {
-       vq->ready = 0;
+       vq->ready = false;
        vq->desc_addr = 0;
        vq->driver_addr = 0;
        vq->device_addr = 0;
@@ -131,9 +131,10 @@ static void vdpasim_work(struct work_struct *work)
                                                 vdpasim, work);
        struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
        struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
-       size_t read, write, total_write;
-       int err;
+       ssize_t read, write;
+       size_t total_write;
        int pkts = 0;
+       int err;
 
        spin_lock(&vdpasim->lock);
 
index c4f2737..2c75d16 100644 (file)
@@ -13,15 +13,6 @@ config VHOST_RING
          This option is selected by any driver which needs to access
          the host side of a virtio ring.
 
-config VHOST_DPN
-       bool
-       depends on !ARM || AEABI
-       default y
-       help
-         Anything selecting VHOST or VHOST_RING must depend on VHOST_DPN.
-         This excludes the deprecated ARM ABI since that forces a 4 byte
-         alignment on all structs - incompatible with virtio spec requirements.
-
 config VHOST
        tristate
        select VHOST_IOTLB
@@ -37,7 +28,7 @@ if VHOST_MENU
 
 config VHOST_NET
        tristate "Host kernel accelerator for virtio net"
-       depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) && VHOST_DPN
+       depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
        select VHOST
        ---help---
          This kernel module can be loaded in host kernel to accelerate
@@ -49,7 +40,7 @@ config VHOST_NET
 
 config VHOST_SCSI
        tristate "VHOST_SCSI TCM fabric driver"
-       depends on TARGET_CORE && EVENTFD && VHOST_DPN
+       depends on TARGET_CORE && EVENTFD
        select VHOST
        default n
        ---help---
@@ -58,7 +49,7 @@ config VHOST_SCSI
 
 config VHOST_VSOCK
        tristate "vhost virtio-vsock driver"
-       depends on VSOCKETS && EVENTFD && VHOST_DPN
+       depends on VSOCKETS && EVENTFD
        select VHOST
        select VIRTIO_VSOCKETS_COMMON
        default n
@@ -72,7 +63,7 @@ config VHOST_VSOCK
 
 config VHOST_VDPA
        tristate "Vhost driver for vDPA-based backend"
-       depends on EVENTFD && VHOST_DPN
+       depends on EVENTFD
        select VHOST
        depends on VDPA
        help
index 516519d..e992dec 100644 (file)
@@ -1327,7 +1327,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
        }
        vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
                       UIO_MAXIOV + VHOST_NET_BATCH,
-                      VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT,
+                      VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
                       NULL);
 
        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
index 8b104f7..6fb4d7e 100644 (file)
@@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
-                      VHOST_SCSI_WEIGHT, 0, NULL);
+                      VHOST_SCSI_WEIGHT, 0, true, NULL);
 
        vhost_scsi_init_inflight(vs, NULL);
 
index 9a3a090..0466921 100644 (file)
@@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
        vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
        n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
        vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
-                      VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
+                      VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
 
        f->private_data = n;
 
index 40e2a57..7580e34 100644 (file)
 #include <linux/module.h>
 #include <linux/cdev.h>
 #include <linux/device.h>
+#include <linux/mm.h>
 #include <linux/iommu.h>
 #include <linux/uuid.h>
 #include <linux/vdpa.h>
 #include <linux/nospec.h>
 #include <linux/vhost.h>
 #include <linux/virtio_net.h>
+#include <linux/kernel.h>
 
 #include "vhost.h"
 
@@ -70,6 +72,7 @@ struct vhost_vdpa {
        int nvqs;
        int virtio_id;
        int minor;
+       struct eventfd_ctx *config_ctx;
 };
 
 static DEFINE_IDA(vhost_vdpa_ida);
@@ -101,6 +104,17 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t vhost_vdpa_config_cb(void *private)
+{
+       struct vhost_vdpa *v = private;
+       struct eventfd_ctx *config_ctx = v->config_ctx;
+
+       if (config_ctx)
+               eventfd_signal(config_ctx, 1);
+
+       return IRQ_HANDLED;
+}
+
 static void vhost_vdpa_reset(struct vhost_vdpa *v)
 {
        struct vdpa_device *vdpa = v->vdpa;
@@ -288,6 +302,36 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
        return 0;
 }
 
+static void vhost_vdpa_config_put(struct vhost_vdpa *v)
+{
+       if (v->config_ctx)
+               eventfd_ctx_put(v->config_ctx);
+}
+
+static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
+{
+       struct vdpa_callback cb;
+       int fd;
+       struct eventfd_ctx *ctx;
+
+       cb.callback = vhost_vdpa_config_cb;
+       cb.private = v->vdpa;
+       if (copy_from_user(&fd, argp, sizeof(fd)))
+               return  -EFAULT;
+
+       ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
+       swap(ctx, v->config_ctx);
+
+       if (!IS_ERR_OR_NULL(ctx))
+               eventfd_ctx_put(ctx);
+
+       if (IS_ERR(v->config_ctx))
+               return PTR_ERR(v->config_ctx);
+
+       v->vdpa->config->set_config_cb(v->vdpa, &cb);
+
+       return 0;
+}
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                                   void __user *argp)
 {
@@ -395,6 +439,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        case VHOST_SET_LOG_FD:
                r = -ENOIOCTLCMD;
                break;
+       case VHOST_VDPA_SET_CONFIG_CALL:
+               r = vhost_vdpa_set_config_call(v, argp);
+               break;
        default:
                r = vhost_dev_ioctl(&v->vdev, cmd, argp);
                if (r == -ENOIOCTLCMD)
@@ -694,7 +741,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
                vqs[i] = &v->vqs[i];
                vqs[i]->handle_kick = handle_vq_kick;
        }
-       vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
+       vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
                       vhost_vdpa_process_iotlb_msg);
 
        dev->iotlb = vhost_iotlb_alloc(0, 0);
@@ -729,6 +776,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        vhost_dev_stop(&v->vdev);
        vhost_vdpa_iotlb_free(v);
        vhost_vdpa_free_domain(v);
+       vhost_vdpa_config_put(v);
        vhost_dev_cleanup(&v->vdev);
        kfree(v->vdev.vqs);
        mutex_unlock(&d->mutex);
@@ -739,12 +787,74 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        return 0;
 }
 
+#ifdef CONFIG_MMU
+static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+{
+       struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+       struct vdpa_notification_area notify;
+       struct vm_area_struct *vma = vmf->vma;
+       u16 index = vma->vm_pgoff;
+
+       notify = ops->get_vq_notification(vdpa, index);
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+                           notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+                           vma->vm_page_prot))
+               return VM_FAULT_SIGBUS;
+
+       return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+       .fault = vhost_vdpa_fault,
+};
+
+static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct vhost_vdpa *v = vma->vm_file->private_data;
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+       struct vdpa_notification_area notify;
+       int index = vma->vm_pgoff;
+
+       if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+               return -EINVAL;
+       if ((vma->vm_flags & VM_SHARED) == 0)
+               return -EINVAL;
+       if (vma->vm_flags & VM_READ)
+               return -EINVAL;
+       if (index > 65535)
+               return -EINVAL;
+       if (!ops->get_vq_notification)
+               return -ENOTSUPP;
+
+       /* To be safe and easily modelled by userspace, We only
+        * support the doorbell which sits on the page boundary and
+        * does not share the page with other registers.
+        */
+       notify = ops->get_vq_notification(vdpa, index);
+       if (notify.addr & (PAGE_SIZE - 1))
+               return -EINVAL;
+       if (vma->vm_end - vma->vm_start != notify.size)
+               return -ENOTSUPP;
+
+       vma->vm_ops = &vhost_vdpa_vm_ops;
+       return 0;
+}
+#endif /* CONFIG_MMU */
+
 static const struct file_operations vhost_vdpa_fops = {
        .owner          = THIS_MODULE,
        .open           = vhost_vdpa_open,
        .release        = vhost_vdpa_release,
        .write_iter     = vhost_vdpa_chr_write_iter,
        .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
+#ifdef CONFIG_MMU
+       .mmap           = vhost_vdpa_mmap,
+#endif /* CONFIG_MMU */
        .compat_ioctl   = compat_ptr_ioctl,
 };
 
index 421710c..d7b8df3 100644 (file)
@@ -165,11 +165,16 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
                             void *key)
 {
        struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+       struct vhost_work *work = &poll->work;
 
        if (!(key_to_poll(key) & poll->mask))
                return 0;
 
-       vhost_poll_queue(poll);
+       if (!poll->dev->use_worker)
+               work->fn(work);
+       else
+               vhost_poll_queue(poll);
+
        return 0;
 }
 
@@ -450,6 +455,7 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
 void vhost_dev_init(struct vhost_dev *dev,
                    struct vhost_virtqueue **vqs, int nvqs,
                    int iov_limit, int weight, int byte_weight,
+                   bool use_worker,
                    int (*msg_handler)(struct vhost_dev *dev,
                                       struct vhost_iotlb_msg *msg))
 {
@@ -467,6 +473,7 @@ void vhost_dev_init(struct vhost_dev *dev,
        dev->iov_limit = iov_limit;
        dev->weight = weight;
        dev->byte_weight = byte_weight;
+       dev->use_worker = use_worker;
        dev->msg_handler = msg_handler;
        init_llist_head(&dev->work_list);
        init_waitqueue_head(&dev->wait);
@@ -530,6 +537,36 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
 }
 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 
+static void vhost_attach_mm(struct vhost_dev *dev)
+{
+       /* No owner, become one */
+       if (dev->use_worker) {
+               dev->mm = get_task_mm(current);
+       } else {
+               /* vDPA device does not use worker thead, so there's
+                * no need to hold the address space for mm. This help
+                * to avoid deadlock in the case of mmap() which may
+                * held the refcnt of the file and depends on release
+                * method to remove vma.
+                */
+               dev->mm = current->mm;
+               mmgrab(dev->mm);
+       }
+}
+
+static void vhost_detach_mm(struct vhost_dev *dev)
+{
+       if (!dev->mm)
+               return;
+
+       if (dev->use_worker)
+               mmput(dev->mm);
+       else
+               mmdrop(dev->mm);
+
+       dev->mm = NULL;
+}
+
 /* Caller should have device mutex */
 long vhost_dev_set_owner(struct vhost_dev *dev)
 {
@@ -542,21 +579,24 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
                goto err_mm;
        }
 
-       /* No owner, become one */
-       dev->mm = get_task_mm(current);
+       vhost_attach_mm(dev);
+
        dev->kcov_handle = kcov_common_handle();
-       worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
-       if (IS_ERR(worker)) {
-               err = PTR_ERR(worker);
-               goto err_worker;
-       }
+       if (dev->use_worker) {
+               worker = kthread_create(vhost_worker, dev,
+                                       "vhost-%d", current->pid);
+               if (IS_ERR(worker)) {
+                       err = PTR_ERR(worker);
+                       goto err_worker;
+               }
 
-       dev->worker = worker;
-       wake_up_process(worker);        /* avoid contributing to loadavg */
+               dev->worker = worker;
+               wake_up_process(worker); /* avoid contributing to loadavg */
 
-       err = vhost_attach_cgroups(dev);
-       if (err)
-               goto err_cgroup;
+               err = vhost_attach_cgroups(dev);
+               if (err)
+                       goto err_cgroup;
+       }
 
        err = vhost_dev_alloc_iovecs(dev);
        if (err)
@@ -564,12 +604,12 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
 
        return 0;
 err_cgroup:
-       kthread_stop(worker);
-       dev->worker = NULL;
+       if (dev->worker) {
+               kthread_stop(dev->worker);
+               dev->worker = NULL;
+       }
 err_worker:
-       if (dev->mm)
-               mmput(dev->mm);
-       dev->mm = NULL;
+       vhost_detach_mm(dev);
        dev->kcov_handle = 0;
 err_mm:
        return err;
@@ -666,9 +706,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
                dev->worker = NULL;
                dev->kcov_handle = 0;
        }
-       if (dev->mm)
-               mmput(dev->mm);
-       dev->mm = NULL;
+       vhost_detach_mm(dev);
 }
 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 
@@ -878,7 +916,7 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 
 #define vhost_put_user(vq, x, ptr)             \
 ({ \
-       int ret = -EFAULT; \
+       int ret; \
        if (!vq->iotlb) { \
                ret = __put_user(x, ptr); \
        } else { \
@@ -1240,9 +1278,9 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
 }
 
 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
-                        struct vring_desc __user *desc,
-                        struct vring_avail __user *avail,
-                        struct vring_used __user *used)
+                        vring_desc_t __user *desc,
+                        vring_avail_t __user *avail,
+                        vring_used_t __user *used)
 
 {
        return access_ok(desc, vhost_get_desc_size(vq, num)) &&
@@ -1570,7 +1608,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
                        r = -EFAULT;
                        break;
                }
-               eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+               eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
                if (IS_ERR(eventfp)) {
                        r = PTR_ERR(eventfp);
                        break;
@@ -1586,7 +1624,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
                        r = -EFAULT;
                        break;
                }
-               ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
+               ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
                if (IS_ERR(ctx)) {
                        r = PTR_ERR(ctx);
                        break;
@@ -1598,7 +1636,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
                        r = -EFAULT;
                        break;
                }
-               ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
+               ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
                if (IS_ERR(ctx)) {
                        r = PTR_ERR(ctx);
                        break;
@@ -1723,7 +1761,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
                r = get_user(fd, (int __user *)argp);
                if (r < 0)
                        break;
-               ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
+               ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
                if (IS_ERR(ctx)) {
                        r = PTR_ERR(ctx);
                        break;
@@ -2296,7 +2334,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
                            struct vring_used_elem *heads,
                            unsigned count)
 {
-       struct vring_used_elem __user *used;
+       vring_used_elem_t __user *used;
        u16 old, new;
        int start;
 
index f8403bd..c8e96a0 100644 (file)
@@ -67,9 +67,9 @@ struct vhost_virtqueue {
        /* The actual ring of buffers. */
        struct mutex mutex;
        unsigned int num;
-       struct vring_desc __user *desc;
-       struct vring_avail __user *avail;
-       struct vring_used __user *used;
+       vring_desc_t __user *desc;
+       vring_avail_t __user *avail;
+       vring_used_t __user *used;
        const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
        struct file *kick;
        struct eventfd_ctx *call_ctx;
@@ -154,6 +154,7 @@ struct vhost_dev {
        int weight;
        int byte_weight;
        u64 kcov_handle;
+       bool use_worker;
        int (*msg_handler)(struct vhost_dev *dev,
                           struct vhost_iotlb_msg *msg);
 };
@@ -161,6 +162,7 @@ struct vhost_dev {
 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
                    int nvqs, int iov_limit, int weight, int byte_weight,
+                   bool use_worker,
                    int (*msg_handler)(struct vhost_dev *dev,
                                       struct vhost_iotlb_msg *msg));
 long vhost_dev_set_owner(struct vhost_dev *dev);
index ba8e0d6..e059a9a 100644 (file)
@@ -620,9 +620,9 @@ static inline int xfer_to_user(const struct vringh *vrh,
  */
 int vringh_init_user(struct vringh *vrh, u64 features,
                     unsigned int num, bool weak_barriers,
-                    struct vring_desc __user *desc,
-                    struct vring_avail __user *avail,
-                    struct vring_used __user *used)
+                    vring_desc_t __user *desc,
+                    vring_avail_t __user *avail,
+                    vring_used_t __user *used)
 {
        /* Sane power of 2 please! */
        if (!num || num > 0xffff || (num & (num - 1))) {
index fb4e944..a483cec 100644 (file)
@@ -632,7 +632,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
 
        vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
                       UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
-                      VHOST_VSOCK_WEIGHT, NULL);
+                      VHOST_VSOCK_WEIGHT, true, NULL);
 
        file->private_data = vsock;
        spin_lock_init(&vsock->send_pkt_list_lock);
index 69a32df..4c1e146 100644 (file)
@@ -78,6 +78,23 @@ config VIRTIO_BALLOON
 
         If unsure, say M.
 
+config VIRTIO_MEM
+       tristate "Virtio mem driver"
+       default m
+       depends on X86_64
+       depends on VIRTIO
+       depends on MEMORY_HOTPLUG_SPARSE
+       depends on MEMORY_HOTREMOVE
+       select CONTIG_ALLOC
+       help
+        This driver provides access to virtio-mem paravirtualized memory
+        devices, allowing to hotplug and hotunplug memory.
+
+        This driver was only tested under x86-64, but should theoretically
+        work on all architectures that support memory hotplug and hotremove.
+
+        If unsure, say M.
+
 config VIRTIO_INPUT
        tristate "Virtio input driver"
        depends on VIRTIO
index 29a1386..4d99379 100644 (file)
@@ -7,3 +7,4 @@ virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
 obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
 obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
 obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
+obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
index 51086a5..1f157d2 100644 (file)
@@ -1107,11 +1107,18 @@ static int virtballoon_restore(struct virtio_device *vdev)
 
 static int virtballoon_validate(struct virtio_device *vdev)
 {
-       /* Tell the host whether we care about poisoned pages. */
+       /*
+        * Inform the hypervisor that our pages are poisoned or
+        * initialized. If we cannot do that then we should disable
+        * page reporting as it could potentially change the contents
+        * of our free pages.
+        */
        if (!want_init_on_free() &&
            (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) ||
             !page_poisoning_enabled()))
                __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
+       else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
+               __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
 
        __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
        return 0;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
new file mode 100644 (file)
index 0000000..50c689f
--- /dev/null
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio-mem device driver.
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Author(s): David Hildenbrand <david@redhat.com>
+ */
+
+#include <linux/virtio.h>
+#include <linux/virtio_mem.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/memory_hotplug.h>
+#include <linux/memory.h>
+#include <linux/hrtimer.h>
+#include <linux/crash_dump.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/lockdep.h>
+
+#include <acpi/acpi_numa.h>
+
+static bool unplug_online = true;
+module_param(unplug_online, bool, 0644);
+MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
+
+enum virtio_mem_mb_state {
+       /* Unplugged, not added to Linux. Can be reused later. */
+       VIRTIO_MEM_MB_STATE_UNUSED = 0,
+       /* (Partially) plugged, not added to Linux. Error on add_memory(). */
+       VIRTIO_MEM_MB_STATE_PLUGGED,
+       /* Fully plugged, fully added to Linux, offline. */
+       VIRTIO_MEM_MB_STATE_OFFLINE,
+       /* Partially plugged, fully added to Linux, offline. */
+       VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
+       /* Fully plugged, fully added to Linux, online (!ZONE_MOVABLE). */
+       VIRTIO_MEM_MB_STATE_ONLINE,
+       /* Partially plugged, fully added to Linux, online (!ZONE_MOVABLE). */
+       VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
+       /*
+        * Fully plugged, fully added to Linux, online (ZONE_MOVABLE).
+        * We are not allowed to allocate (unplug) parts of this block that
+        * are not movable (similar to gigantic pages). We will never allow
+        * to online OFFLINE_PARTIAL to ZONE_MOVABLE (as they would contain
+        * unmovable parts).
+        */
+       VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE,
+       VIRTIO_MEM_MB_STATE_COUNT
+};
+
+struct virtio_mem {
+       struct virtio_device *vdev;
+
+       /* We might first have to unplug all memory when starting up. */
+       bool unplug_all_required;
+
+       /* Workqueue that processes the plug/unplug requests. */
+       struct work_struct wq;
+       atomic_t config_changed;
+
+       /* Virtqueue for guest->host requests. */
+       struct virtqueue *vq;
+
+       /* Wait for a host response to a guest request. */
+       wait_queue_head_t host_resp;
+
+       /* Space for one guest request and the host response. */
+       struct virtio_mem_req req;
+       struct virtio_mem_resp resp;
+
+       /* The current size of the device. */
+       uint64_t plugged_size;
+       /* The requested size of the device. */
+       uint64_t requested_size;
+
+       /* The device block size (for communicating with the device). */
+       uint64_t device_block_size;
+       /* The translated node id. NUMA_NO_NODE in case not specified. */
+       int nid;
+       /* Physical start address of the memory region. */
+       uint64_t addr;
+       /* Maximum region size in bytes. */
+       uint64_t region_size;
+
+       /* The subblock size. */
+       uint64_t subblock_size;
+       /* The number of subblocks per memory block. */
+       uint32_t nb_sb_per_mb;
+
+       /* Id of the first memory block of this device. */
+       unsigned long first_mb_id;
+       /* Id of the last memory block of this device. */
+       unsigned long last_mb_id;
+       /* Id of the last usable memory block of this device. */
+       unsigned long last_usable_mb_id;
+       /* Id of the next memory bock to prepare when needed. */
+       unsigned long next_mb_id;
+
+       /* The parent resource for all memory added via this device. */
+       struct resource *parent_resource;
+
+       /* Summary of all memory block states. */
+       unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
+#define VIRTIO_MEM_NB_OFFLINE_THRESHOLD                10
+
+       /*
+        * One byte state per memory block.
+        *
+        * Allocated via vmalloc(). When preparing new blocks, resized
+        * (alloc+copy+free) when needed (crossing pages with the next mb).
+        * (when crossing pages).
+        *
+        * With 128MB memory blocks, we have states for 512GB of memory in one
+        * page.
+        */
+       uint8_t *mb_state;
+
+       /*
+        * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
+        *
+        * With 4MB subblocks, we manage 128GB of memory in one page.
+        */
+       unsigned long *sb_bitmap;
+
+       /*
+        * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
+        *
+        * When this lock is held the pointers can't change, ONLINE and
+        * OFFLINE blocks can't change the state and no subblocks will get
+        * plugged/unplugged.
+        */
+       struct mutex hotplug_mutex;
+       bool hotplug_active;
+
+       /* An error occurred we cannot handle - stop processing requests. */
+       bool broken;
+
+       /* The driver is being removed. */
+       spinlock_t removal_lock;
+       bool removing;
+
+       /* Timer for retrying to plug/unplug memory. */
+       struct hrtimer retry_timer;
+       unsigned int retry_timer_ms;
+#define VIRTIO_MEM_RETRY_TIMER_MIN_MS          50000
+#define VIRTIO_MEM_RETRY_TIMER_MAX_MS          300000
+
+       /* Memory notifier (online/offline events). */
+       struct notifier_block memory_notifier;
+
+       /* Next device in the list of virtio-mem devices. */
+       struct list_head next;
+};
+
+/*
+ * We have to share a single online_page callback among all virtio-mem
+ * devices. We use RCU to iterate the list in the callback.
+ */
+static DEFINE_MUTEX(virtio_mem_mutex);
+static LIST_HEAD(virtio_mem_devices);
+
+static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
+
+/*
+ * Register a virtio-mem device so it will be considered for the online_page
+ * callback.
+ */
+static int register_virtio_mem_device(struct virtio_mem *vm)
+{
+       int rc = 0;
+
+       /* First device registers the callback. */
+       mutex_lock(&virtio_mem_mutex);
+       if (list_empty(&virtio_mem_devices))
+               rc = set_online_page_callback(&virtio_mem_online_page_cb);
+       if (!rc)
+               list_add_rcu(&vm->next, &virtio_mem_devices);
+       mutex_unlock(&virtio_mem_mutex);
+
+       return rc;
+}
+
+/*
+ * Unregister a virtio-mem device so it will no longer be considered for the
+ * online_page callback.
+ */
+static void unregister_virtio_mem_device(struct virtio_mem *vm)
+{
+       /* Last device unregisters the callback. */
+       mutex_lock(&virtio_mem_mutex);
+       list_del_rcu(&vm->next);
+       if (list_empty(&virtio_mem_devices))
+               restore_online_page_callback(&virtio_mem_online_page_cb);
+       mutex_unlock(&virtio_mem_mutex);
+
+       synchronize_rcu();
+}
+
+/*
+ * Calculate the memory block id of a given address.
+ */
+static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
+{
+       return addr / memory_block_size_bytes();
+}
+
+/*
+ * Calculate the physical start address of a given memory block id.
+ */
+static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
+{
+       return mb_id * memory_block_size_bytes();
+}
+
+/*
+ * Calculate the subblock id of a given address.
+ */
+static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
+                                             unsigned long addr)
+{
+       const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
+       const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
+
+       return (addr - mb_addr) / vm->subblock_size;
+}
+
+/*
+ * Set the state of a memory block, taking care of the state counter.
+ */
+static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
+                                   enum virtio_mem_mb_state state)
+{
+       const unsigned long idx = mb_id - vm->first_mb_id;
+       enum virtio_mem_mb_state old_state;
+
+       old_state = vm->mb_state[idx];
+       vm->mb_state[idx] = state;
+
+       BUG_ON(vm->nb_mb_state[old_state] == 0);
+       vm->nb_mb_state[old_state]--;
+       vm->nb_mb_state[state]++;
+}
+
+/*
+ * Get the state of a memory block.
+ */
+static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
+                                                       unsigned long mb_id)
+{
+       const unsigned long idx = mb_id - vm->first_mb_id;
+
+       return vm->mb_state[idx];
+}
+
+/*
+ * Prepare the state array for the next memory block.
+ */
+static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
+{
+       unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
+       unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
+       int old_pages = PFN_UP(old_bytes);
+       int new_pages = PFN_UP(new_bytes);
+       uint8_t *new_mb_state;
+
+       if (vm->mb_state && old_pages == new_pages)
+               return 0;
+
+       new_mb_state = vzalloc(new_pages * PAGE_SIZE);
+       if (!new_mb_state)
+               return -ENOMEM;
+
+       mutex_lock(&vm->hotplug_mutex);
+       if (vm->mb_state)
+               memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
+       vfree(vm->mb_state);
+       vm->mb_state = new_mb_state;
+       mutex_unlock(&vm->hotplug_mutex);
+
+       return 0;
+}
+
+#define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
+       for (_mb_id = _vm->first_mb_id; \
+            _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
+            _mb_id++) \
+               if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+
+#define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
+       for (_mb_id = _vm->next_mb_id - 1; \
+            _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
+            _mb_id--) \
+               if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+
+/*
+ * Mark all selected subblocks plugged.
+ *
+ * Will not modify the state of the memory block.
+ */
+static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
+                                        unsigned long mb_id, int sb_id,
+                                        int count)
+{
+       const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+       __bitmap_set(vm->sb_bitmap, bit, count);
+}
+
+/*
+ * Mark all selected subblocks unplugged.
+ *
+ * Will not modify the state of the memory block.
+ */
+static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
+                                          unsigned long mb_id, int sb_id,
+                                          int count)
+{
+       const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+       __bitmap_clear(vm->sb_bitmap, bit, count);
+}
+
+/*
+ * Test if all selected subblocks are plugged.
+ */
+static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
+                                         unsigned long mb_id, int sb_id,
+                                         int count)
+{
+       const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+       if (count == 1)
+               return test_bit(bit, vm->sb_bitmap);
+
+       /* TODO: Helper similar to bitmap_set() */
+       return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
+              bit + count;
+}
+
+/*
+ * Test if all selected subblocks are unplugged.
+ */
+static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
+                                           unsigned long mb_id, int sb_id,
+                                           int count)
+{
+       const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+       /* TODO: Helper similar to bitmap_set() */
+       return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
+}
+
+/*
+ * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
+ * none.
+ */
+static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
+                                           unsigned long mb_id)
+{
+       const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
+
+       return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
+              bit;
+}
+
+/*
+ * Prepare the subblock bitmap for the next memory block.
+ */
+static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
+{
+       const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
+       const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
+       const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
+       int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
+       int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
+       unsigned long *new_sb_bitmap, *old_sb_bitmap;
+
+       if (vm->sb_bitmap && old_pages == new_pages)
+               return 0;
+
+       new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
+       if (!new_sb_bitmap)
+               return -ENOMEM;
+
+       mutex_lock(&vm->hotplug_mutex);
+       if (new_sb_bitmap)
+               memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
+
+       old_sb_bitmap = vm->sb_bitmap;
+       vm->sb_bitmap = new_sb_bitmap;
+       mutex_unlock(&vm->hotplug_mutex);
+
+       vfree(old_sb_bitmap);
+       return 0;
+}
+
+/*
+ * Try to add a memory block to Linux. This will usually only fail
+ * if out of memory.
+ *
+ * Must not be called with the vm->hotplug_mutex held (possible deadlock with
+ * onlining code).
+ *
+ * Will not modify the state of the memory block.
+ */
+static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
+{
+       const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+       int nid = vm->nid;
+
+       if (nid == NUMA_NO_NODE)
+               nid = memory_add_physaddr_to_nid(addr);
+
+       dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
+       return add_memory(nid, addr, memory_block_size_bytes());
+}
+
+/*
+ * Try to remove a memory block from Linux. Will only fail if the memory block
+ * is not offline.
+ *
+ * Must not be called with the vm->hotplug_mutex held (possible deadlock with
+ * onlining code).
+ *
+ * Will not modify the state of the memory block.
+ */
+static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
+{
+       const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+       int nid = vm->nid;
+
+       if (nid == NUMA_NO_NODE)
+               nid = memory_add_physaddr_to_nid(addr);
+
+       dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
+       return remove_memory(nid, addr, memory_block_size_bytes());
+}
+
+/*
+ * Try to offline and remove a memory block from Linux.
+ *
+ * Must not be called with the vm->hotplug_mutex held (possible deadlock with
+ * onlining code).
+ *
+ * Will not modify the state of the memory block.
+ */
+static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
+                                           unsigned long mb_id)
+{
+       const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+       int nid = vm->nid;
+
+       if (nid == NUMA_NO_NODE)
+               nid = memory_add_physaddr_to_nid(addr);
+
+       dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
+               mb_id);
+       return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
+}
+
+/*
+ * Trigger the workqueue so the device can perform its magic.
+ */
+static void virtio_mem_retry(struct virtio_mem *vm)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vm->removal_lock, flags);
+       if (!vm->removing)
+               queue_work(system_freezable_wq, &vm->wq);
+       spin_unlock_irqrestore(&vm->removal_lock, flags);
+}
+
+static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
+{
+       int node = NUMA_NO_NODE;
+
+#if defined(CONFIG_ACPI_NUMA)
+       if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
+               node = pxm_to_node(node_id);
+#endif
+       return node;
+}
+
+/*
+ * Test if a virtio-mem device overlaps with the given range. Can be called
+ * from (notifier) callbacks lockless.
+ */
+static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
+                                     unsigned long start, unsigned long size)
+{
+       unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
+       unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
+                               memory_block_size_bytes();
+
+       return start < dev_end && dev_start < start + size;
+}
+
+/*
+ * Test if a virtio-mem device owns a memory block. Can be called from
+ * (notifier) callbacks lockless.
+ */
+static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
+{
+       return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
+}
+
+static int virtio_mem_notify_going_online(struct virtio_mem *vm,
+                                         unsigned long mb_id,
+                                         enum zone_type zone)
+{
+       switch (virtio_mem_mb_get_state(vm, mb_id)) {
+       case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
+               /*
+                * We won't allow to online a partially plugged memory block
+                * to the MOVABLE zone - it would contain unmovable parts.
+                */
+               if (zone == ZONE_MOVABLE) {
+                       dev_warn_ratelimited(&vm->vdev->dev,
+                                            "memory block has holes, MOVABLE not supported\n");
+                       return NOTIFY_BAD;
+               }
+               return NOTIFY_OK;
+       case VIRTIO_MEM_MB_STATE_OFFLINE:
+               return NOTIFY_OK;
+       default:
+               break;
+       }
+       dev_warn_ratelimited(&vm->vdev->dev,
+                            "memory block onlining denied\n");
+       return NOTIFY_BAD;
+}
+
+static void virtio_mem_notify_offline(struct virtio_mem *vm,
+                                     unsigned long mb_id)
+{
+       switch (virtio_mem_mb_get_state(vm, mb_id)) {
+       case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+               break;
+       case VIRTIO_MEM_MB_STATE_ONLINE:
+       case VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE:
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_OFFLINE);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       /*
+        * Trigger the workqueue, maybe we can now unplug memory. Also,
+        * when we offline and remove a memory block, this will re-trigger
+        * us immediately - which is often nice because the removal of
+        * the memory block (e.g., memmap) might have freed up memory
+        * on other memory blocks we manage.
+        */
+       virtio_mem_retry(vm);
+}
+
+static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id,
+                                    enum zone_type zone)
+{
+       unsigned long nb_offline;
+
+       switch (virtio_mem_mb_get_state(vm, mb_id)) {
+       case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
+               BUG_ON(zone == ZONE_MOVABLE);
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+               break;
+       case VIRTIO_MEM_MB_STATE_OFFLINE:
+               if (zone == ZONE_MOVABLE)
+                       virtio_mem_mb_set_state(vm, mb_id,
+                                           VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE);
+               else
+                       virtio_mem_mb_set_state(vm, mb_id,
+                                               VIRTIO_MEM_MB_STATE_ONLINE);
+               break;
+       default:
+               BUG();
+               break;
+       }
+       nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
+                    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
+
+       /* see if we can add new blocks now that we onlined one block */
+       if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
+               virtio_mem_retry(vm);
+}
+
+static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
+                                           unsigned long mb_id)
+{
+       const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+       struct page *page;
+       unsigned long pfn;
+       int sb_id, i;
+
+       for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
+               if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+                       continue;
+               /*
+                * Drop our reference to the pages so the memory can get
+                * offlined and add the unplugged pages to the managed
+                * page counters (so offlining code can correctly subtract
+                * them again).
+                */
+               pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+                              sb_id * vm->subblock_size);
+               adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
+               for (i = 0; i < nr_pages; i++) {
+                       page = pfn_to_page(pfn + i);
+                       if (WARN_ON(!page_ref_dec_and_test(page)))
+                               dump_page(page, "unplugged page referenced");
+               }
+       }
+}
+
+static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
+                                            unsigned long mb_id)
+{
+       const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+       unsigned long pfn;
+       int sb_id, i;
+
+       for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
+               if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+                       continue;
+               /*
+                * Get the reference we dropped when going offline and
+                * subtract the unplugged pages from the managed page
+                * counters.
+                */
+               pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+                              sb_id * vm->subblock_size);
+               adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+               for (i = 0; i < nr_pages; i++)
+                       page_ref_inc(pfn_to_page(pfn + i));
+       }
+}
+
+/*
+ * This callback will either be called synchronously from add_memory() or
+ * asynchronously (e.g., triggered via user space). We have to be careful
+ * with locking when calling add_memory().
+ */
+static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
+                                        unsigned long action, void *arg)
+{
+       struct virtio_mem *vm = container_of(nb, struct virtio_mem,
+                                            memory_notifier);
+       struct memory_notify *mhp = arg;
+       const unsigned long start = PFN_PHYS(mhp->start_pfn);
+       const unsigned long size = PFN_PHYS(mhp->nr_pages);
+       const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
+       enum zone_type zone;
+       int rc = NOTIFY_OK;
+
+       if (!virtio_mem_overlaps_range(vm, start, size))
+               return NOTIFY_DONE;
+
+       /*
+        * Memory is onlined/offlined in memory block granularity. We cannot
+        * cross virtio-mem device boundaries and memory block boundaries. Bail
+        * out if this ever changes.
+        */
+       if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
+                        !IS_ALIGNED(start, memory_block_size_bytes())))
+               return NOTIFY_BAD;
+
+       /*
+        * Avoid circular locking lockdep warnings. We lock the mutex
+        * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
+        * blocking_notifier_call_chain() has it's own lock, which gets unlocked
+        * between both notifier calls and will bail out. False positive.
+        */
+       lockdep_off();
+
+       switch (action) {
+       case MEM_GOING_OFFLINE:
+               mutex_lock(&vm->hotplug_mutex);
+               if (vm->removing) {
+                       rc = notifier_from_errno(-EBUSY);
+                       mutex_unlock(&vm->hotplug_mutex);
+                       break;
+               }
+               vm->hotplug_active = true;
+               virtio_mem_notify_going_offline(vm, mb_id);
+               break;
+       case MEM_GOING_ONLINE:
+               mutex_lock(&vm->hotplug_mutex);
+               if (vm->removing) {
+                       rc = notifier_from_errno(-EBUSY);
+                       mutex_unlock(&vm->hotplug_mutex);
+                       break;
+               }
+               vm->hotplug_active = true;
+               zone = page_zonenum(pfn_to_page(mhp->start_pfn));
+               rc = virtio_mem_notify_going_online(vm, mb_id, zone);
+               break;
+       case MEM_OFFLINE:
+               virtio_mem_notify_offline(vm, mb_id);
+               vm->hotplug_active = false;
+               mutex_unlock(&vm->hotplug_mutex);
+               break;
+       case MEM_ONLINE:
+               zone = page_zonenum(pfn_to_page(mhp->start_pfn));
+               virtio_mem_notify_online(vm, mb_id, zone);
+               vm->hotplug_active = false;
+               mutex_unlock(&vm->hotplug_mutex);
+               break;
+       case MEM_CANCEL_OFFLINE:
+               if (!vm->hotplug_active)
+                       break;
+               virtio_mem_notify_cancel_offline(vm, mb_id);
+               vm->hotplug_active = false;
+               mutex_unlock(&vm->hotplug_mutex);
+               break;
+       case MEM_CANCEL_ONLINE:
+               if (!vm->hotplug_active)
+                       break;
+               vm->hotplug_active = false;
+               mutex_unlock(&vm->hotplug_mutex);
+               break;
+       default:
+               break;
+       }
+
+       lockdep_on();
+
+       return rc;
+}
+
+/*
+ * Set a range of pages PG_offline. Remember pages that were never onlined
+ * (via generic_online_page()) using PageDirty().
+ */
+static void virtio_mem_set_fake_offline(unsigned long pfn,
+                                       unsigned int nr_pages, bool onlined)
+{
+       for (; nr_pages--; pfn++) {
+               struct page *page = pfn_to_page(pfn);
+
+               __SetPageOffline(page);
+               if (!onlined) {
+                       SetPageDirty(page);
+                       /* FIXME: remove after cleanups */
+                       ClearPageReserved(page);
+               }
+       }
+}
+
+/*
+ * Clear PG_offline from a range of pages. If the pages were never onlined,
+ * (via generic_online_page()), clear PageDirty().
+ */
+static void virtio_mem_clear_fake_offline(unsigned long pfn,
+                                         unsigned int nr_pages, bool onlined)
+{
+       for (; nr_pages--; pfn++) {
+               struct page *page = pfn_to_page(pfn);
+
+               __ClearPageOffline(page);
+               if (!onlined)
+                       ClearPageDirty(page);
+       }
+}
+
+/*
+ * Release a range of fake-offline pages to the buddy, effectively
+ * fake-onlining them.
+ */
+static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
+{
+       const int order = MAX_ORDER - 1;
+       int i;
+
+       /*
+        * We are always called with subblock granularity, which is at least
+        * aligned to MAX_ORDER - 1.
+        */
+       for (i = 0; i < nr_pages; i += 1 << order) {
+               struct page *page = pfn_to_page(pfn + i);
+
+               /*
+                * If the page is PageDirty(), it was kept fake-offline when
+                * onlining the memory block. Otherwise, it was allocated
+                * using alloc_contig_range(). All pages in a subblock are
+                * alike.
+                */
+               if (PageDirty(page)) {
+                       virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+                                                     false);
+                       generic_online_page(page, order);
+               } else {
+                       virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+                                                     true);
+                       free_contig_range(pfn + i, 1 << order);
+                       adjust_managed_page_count(page, 1 << order);
+               }
+       }
+}
+
+static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
+{
+       const unsigned long addr = page_to_phys(page);
+       const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
+       struct virtio_mem *vm;
+       int sb_id;
+
+       /*
+        * We exploit here that subblocks have at least MAX_ORDER - 1
+        * size/alignment and that this callback is is called with such a
+        * size/alignment. So we cannot cross subblocks and therefore
+        * also not memory blocks.
+        */
+       rcu_read_lock();
+       list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
+               if (!virtio_mem_owned_mb(vm, mb_id))
+                       continue;
+
+               sb_id = virtio_mem_phys_to_sb_id(vm, addr);
+               /*
+                * If plugged, online the pages, otherwise, set them fake
+                * offline (PageOffline).
+                */
+               if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+                       generic_online_page(page, order);
+               else
+                       virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
+                                                   false);
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       /* not virtio-mem memory, but e.g., a DIMM. online it */
+       generic_online_page(page, order);
+}
+
+static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
+                                       const struct virtio_mem_req *req)
+{
+       struct scatterlist *sgs[2], sg_req, sg_resp;
+       unsigned int len;
+       int rc;
+
+       /* don't use the request residing on the stack (vaddr) */
+       vm->req = *req;
+
+       /* out: buffer for request */
+       sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
+       sgs[0] = &sg_req;
+
+       /* in: buffer for response */
+       sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
+       sgs[1] = &sg_resp;
+
+       rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
+       if (rc < 0)
+               return rc;
+
+       virtqueue_kick(vm->vq);
+
+       /* wait for a response */
+       wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
+
+       return virtio16_to_cpu(vm->vdev, vm->resp.type);
+}
+
+static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
+                                       uint64_t size)
+{
+       const uint64_t nb_vm_blocks = size / vm->device_block_size;
+       const struct virtio_mem_req req = {
+               .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
+               .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
+               .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
+       };
+
+       if (atomic_read(&vm->config_changed))
+               return -EAGAIN;
+
+       switch (virtio_mem_send_request(vm, &req)) {
+       case VIRTIO_MEM_RESP_ACK:
+               vm->plugged_size += size;
+               return 0;
+       case VIRTIO_MEM_RESP_NACK:
+               return -EAGAIN;
+       case VIRTIO_MEM_RESP_BUSY:
+               return -ETXTBSY;
+       case VIRTIO_MEM_RESP_ERROR:
+               return -EINVAL;
+       default:
+               return -ENOMEM;
+       }
+}
+
+static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
+                                         uint64_t size)
+{
+       const uint64_t nb_vm_blocks = size / vm->device_block_size;
+       const struct virtio_mem_req req = {
+               .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
+               .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
+               .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
+       };
+
+       if (atomic_read(&vm->config_changed))
+               return -EAGAIN;
+
+       switch (virtio_mem_send_request(vm, &req)) {
+       case VIRTIO_MEM_RESP_ACK:
+               vm->plugged_size -= size;
+               return 0;
+       case VIRTIO_MEM_RESP_BUSY:
+               return -ETXTBSY;
+       case VIRTIO_MEM_RESP_ERROR:
+               return -EINVAL;
+       default:
+               return -ENOMEM;
+       }
+}
+
+static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
+{
+       const struct virtio_mem_req req = {
+               .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
+       };
+
+       switch (virtio_mem_send_request(vm, &req)) {
+       case VIRTIO_MEM_RESP_ACK:
+               vm->unplug_all_required = false;
+               vm->plugged_size = 0;
+               /* usable region might have shrunk */
+               atomic_set(&vm->config_changed, 1);
+               return 0;
+       case VIRTIO_MEM_RESP_BUSY:
+               return -ETXTBSY;
+       default:
+               return -ENOMEM;
+       }
+}
+
+/*
+ * Plug selected subblocks. Updates the plugged state, but not the state
+ * of the memory block.
+ */
+static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
+                                int sb_id, int count)
+{
+       const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
+                             sb_id * vm->subblock_size;
+       const uint64_t size = count * vm->subblock_size;
+       int rc;
+
+       dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
+               sb_id, sb_id + count - 1);
+
+       rc = virtio_mem_send_plug_request(vm, addr, size);
+       if (!rc)
+               virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
+       return rc;
+}
+
+/*
+ * Unplug selected subblocks. Updates the plugged state, but not the state
+ * of the memory block.
+ */
+static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
+                                  int sb_id, int count)
+{
+       const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
+                             sb_id * vm->subblock_size;
+       const uint64_t size = count * vm->subblock_size;
+       int rc;
+
+       dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
+               mb_id, sb_id, sb_id + count - 1);
+
+       rc = virtio_mem_send_unplug_request(vm, addr, size);
+       if (!rc)
+               virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
+       return rc;
+}
+
+/*
+ * Unplug the desired number of plugged subblocks of a offline or not-added
+ * memory block. Will fail if any subblock cannot get unplugged (instead of
+ * skipping it).
+ *
+ * Will not modify the state of the memory block.
+ *
+ * Note: can fail after some subblocks were unplugged.
+ */
+static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
+                                      unsigned long mb_id, uint64_t *nb_sb)
+{
+       int sb_id, count;
+       int rc;
+
+       sb_id = vm->nb_sb_per_mb - 1;
+       while (*nb_sb) {
+               /* Find the next candidate subblock */
+               while (sb_id >= 0 &&
+                      virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
+                       sb_id--;
+               if (sb_id < 0)
+                       break;
+               /* Try to unplug multiple subblocks at a time */
+               count = 1;
+               while (count < *nb_sb && sb_id > 0 &&
+                      virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
+                       count++;
+                       sb_id--;
+               }
+
+               rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+               if (rc)
+                       return rc;
+               *nb_sb -= count;
+               sb_id--;
+       }
+
+       return 0;
+}
+
+/*
+ * Unplug all plugged subblocks of an offline or not-added memory block.
+ *
+ * Will not modify the state of the memory block.
+ *
+ * Note: can fail after some subblocks were unplugged.
+ */
+static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
+{
+       uint64_t nb_sb = vm->nb_sb_per_mb;
+
+       return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
+}
+
+/*
+ * Prepare tracking data for the next memory block.
+ */
+static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
+                                     unsigned long *mb_id)
+{
+       int rc;
+
+       if (vm->next_mb_id > vm->last_usable_mb_id)
+               return -ENOSPC;
+
+       /* Resize the state array if required. */
+       rc = virtio_mem_mb_state_prepare_next_mb(vm);
+       if (rc)
+               return rc;
+
+       /* Resize the subblock bitmap if required. */
+       rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
+       if (rc)
+               return rc;
+
+       vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
+       *mb_id = vm->next_mb_id++;
+       return 0;
+}
+
+/*
+ * Don't add too many blocks that are not onlined yet to avoid running OOM.
+ */
+static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
+{
+       unsigned long nb_offline;
+
+       nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
+                    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
+       return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
+}
+
+/*
+ * Try to plug the desired number of subblocks and add the memory block
+ * to Linux.
+ *
+ * Will modify the state of the memory block.
+ */
+static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
+                                     unsigned long mb_id,
+                                     uint64_t *nb_sb)
+{
+       const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
+       int rc, rc2;
+
+       if (WARN_ON_ONCE(!count))
+               return -EINVAL;
+
+       /*
+        * Plug the requested number of subblocks before adding it to linux,
+        * so that onlining will directly online all plugged subblocks.
+        */
+       rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
+       if (rc)
+               return rc;
+
+       /*
+        * Mark the block properly offline before adding it to Linux,
+        * so the memory notifiers will find the block in the right state.
+        */
+       if (count == vm->nb_sb_per_mb)
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_OFFLINE);
+       else
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+
+       /* Add the memory block to linux - if that fails, try to unplug. */
+       rc = virtio_mem_mb_add(vm, mb_id);
+       if (rc) {
+               enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
+
+               dev_err(&vm->vdev->dev,
+                       "adding memory block %lu failed with %d\n", mb_id, rc);
+               rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
+
+               /*
+                * TODO: Linux MM does not properly clean up yet in all cases
+                * where adding of memory failed - especially on -ENOMEM.
+                */
+               if (rc2)
+                       new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
+               virtio_mem_mb_set_state(vm, mb_id, new_state);
+               return rc;
+       }
+
+       *nb_sb -= count;
+       return 0;
+}
+
+/*
+ * Try to plug the desired number of subblocks of a memory block that
+ * is already added to Linux.
+ *
+ * Will modify the state of the memory block.
+ *
+ * Note: Can fail after some subblocks were successfully plugged.
+ */
+static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
+                                    uint64_t *nb_sb, bool online)
+{
+       unsigned long pfn, nr_pages;
+       int sb_id, count;
+       int rc;
+
+       if (WARN_ON_ONCE(!*nb_sb))
+               return -EINVAL;
+
+       while (*nb_sb) {
+               sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
+               if (sb_id >= vm->nb_sb_per_mb)
+                       break;
+               count = 1;
+               while (count < *nb_sb &&
+                      sb_id + count < vm->nb_sb_per_mb &&
+                      !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
+                                                     1))
+                       count++;
+
+               rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
+               if (rc)
+                       return rc;
+               *nb_sb -= count;
+               if (!online)
+                       continue;
+
+               /* fake-online the pages if the memory block is online */
+               pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+                              sb_id * vm->subblock_size);
+               nr_pages = PFN_DOWN(count * vm->subblock_size);
+               virtio_mem_fake_online(pfn, nr_pages);
+       }
+
+       if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+               if (online)
+                       virtio_mem_mb_set_state(vm, mb_id,
+                                               VIRTIO_MEM_MB_STATE_ONLINE);
+               else
+                       virtio_mem_mb_set_state(vm, mb_id,
+                                               VIRTIO_MEM_MB_STATE_OFFLINE);
+       }
+
+       return rc;
+}
+
+/*
+ * Try to plug the requested amount of memory.
+ */
+static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
+{
+       uint64_t nb_sb = diff / vm->subblock_size;
+       unsigned long mb_id;
+       int rc;
+
+       if (!nb_sb)
+               return 0;
+
+       /* Don't race with onlining/offlining */
+       mutex_lock(&vm->hotplug_mutex);
+
+       /* Try to plug subblocks of partially plugged online blocks. */
+       virtio_mem_for_each_mb_state(vm, mb_id,
+                                    VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
+               rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
+               if (rc || !nb_sb)
+                       goto out_unlock;
+               cond_resched();
+       }
+
+       /* Try to plug subblocks of partially plugged offline blocks. */
+       virtio_mem_for_each_mb_state(vm, mb_id,
+                                    VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
+               rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
+               if (rc || !nb_sb)
+                       goto out_unlock;
+               cond_resched();
+       }
+
+       /*
+        * We won't be working on online/offline memory blocks from this point,
+        * so we can't race with memory onlining/offlining. Drop the mutex.
+        */
+       mutex_unlock(&vm->hotplug_mutex);
+
+       /* Try to plug and add unused blocks */
+       virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
+               if (virtio_mem_too_many_mb_offline(vm))
+                       return -ENOSPC;
+
+               rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+               if (rc || !nb_sb)
+                       return rc;
+               cond_resched();
+       }
+
+       /* Try to prepare, plug and add new blocks */
+       while (nb_sb) {
+               if (virtio_mem_too_many_mb_offline(vm))
+                       return -ENOSPC;
+
+               rc = virtio_mem_prepare_next_mb(vm, &mb_id);
+               if (rc)
+                       return rc;
+               rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+               if (rc)
+                       return rc;
+               cond_resched();
+       }
+
+       return 0;
+out_unlock:
+       mutex_unlock(&vm->hotplug_mutex);
+       return rc;
+}
+
+/*
+ * Unplug the desired number of plugged subblocks of an offline memory block.
+ * Will fail if any subblock cannot get unplugged (instead of skipping it).
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ *
+ * Note: Can fail after some subblocks were successfully unplugged.
+ */
+static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
+                                              unsigned long mb_id,
+                                              uint64_t *nb_sb)
+{
+       int rc;
+
+       rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
+
+       /* some subblocks might have been unplugged even on failure */
+       if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+       if (rc)
+               return rc;
+
+       if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+               /*
+                * Remove the block from Linux - this should never fail.
+                * Hinder the block from getting onlined by marking it
+                * unplugged. Temporarily drop the mutex, so
+                * any pending GOING_ONLINE requests can be serviced/rejected.
+                */
+               virtio_mem_mb_set_state(vm, mb_id,
+                                       VIRTIO_MEM_MB_STATE_UNUSED);
+
+               mutex_unlock(&vm->hotplug_mutex);
+               rc = virtio_mem_mb_remove(vm, mb_id);
+               BUG_ON(rc);
+               mutex_lock(&vm->hotplug_mutex);
+       }
+       return 0;
+}
+
+/*
+ * Unplug the given plugged subblocks of an online memory block.
+ *
+ * Will modify the state of the memory block.
+ */
+static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
+                                         unsigned long mb_id, int sb_id,
+                                         int count)
+{
+       const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
+       unsigned long start_pfn;
+       int rc;
+
+       start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+                            sb_id * vm->subblock_size);
+       rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
+                               MIGRATE_MOVABLE, GFP_KERNEL);
+       if (rc == -ENOMEM)
+               /* whoops, out of memory */
+               return rc;
+       if (rc)
+               return -EBUSY;
+
+       /* Mark it as fake-offline before unplugging it */
+       virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
+       adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
+
+       /* Try to unplug the allocated memory */
+       rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+       if (rc) {
+               /* Return the memory to the buddy. */
+               virtio_mem_fake_online(start_pfn, nr_pages);
+               return rc;
+       }
+
+       virtio_mem_mb_set_state(vm, mb_id,
+                               VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+       return 0;
+}
+
+/*
+ * Unplug the desired number of plugged subblocks of an online memory block.
+ * Will skip subblock that are busy.
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ *
+ * Note: Can fail after some subblocks were successfully unplugged. Can
+ *       return 0 even if subblocks were busy and could not get unplugged.
+ */
+static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
+                                             unsigned long mb_id,
+                                             uint64_t *nb_sb)
+{
+       int rc, sb_id;
+
+       /* If possible, try to unplug the complete block in one shot. */
+       if (*nb_sb >= vm->nb_sb_per_mb &&
+           virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+               rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
+                                                   vm->nb_sb_per_mb);
+               if (!rc) {
+                       *nb_sb -= vm->nb_sb_per_mb;
+                       goto unplugged;
+               } else if (rc != -EBUSY)
+                       return rc;
+       }
+
+       /* Fallback to single subblocks. */
+       for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
+               /* Find the next candidate subblock */
+               while (sb_id >= 0 &&
+                      !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+                       sb_id--;
+               if (sb_id < 0)
+                       break;
+
+               rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
+               if (rc == -EBUSY)
+                       continue;
+               else if (rc)
+                       return rc;
+               *nb_sb -= 1;
+       }
+
+unplugged:
+       /*
+        * Once all subblocks of a memory block were unplugged, offline and
+        * remove it. This will usually not fail, as no memory is in use
+        * anymore - however some other notifiers might NACK the request.
+        */
+       if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+               mutex_unlock(&vm->hotplug_mutex);
+               rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
+               mutex_lock(&vm->hotplug_mutex);
+               if (!rc)
+                       virtio_mem_mb_set_state(vm, mb_id,
+                                               VIRTIO_MEM_MB_STATE_UNUSED);
+       }
+
+       return 0;
+}
+
+/*
+ * Try to unplug the requested amount of memory.
+ */
+static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
+{
+       uint64_t nb_sb = diff / vm->subblock_size;
+       unsigned long mb_id;
+       int rc;
+
+       if (!nb_sb)
+               return 0;
+
+       /*
+        * We'll drop the mutex a couple of times when it is safe to do so.
+        * This might result in some blocks switching the state (online/offline)
+        * and we could miss them in this run - we will retry again later.
+        */
+       mutex_lock(&vm->hotplug_mutex);
+
+       /* Try to unplug subblocks of partially plugged offline blocks. */
+       virtio_mem_for_each_mb_state_rev(vm, mb_id,
+                                        VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
+               rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
+                                                        &nb_sb);
+               if (rc || !nb_sb)
+                       goto out_unlock;
+               cond_resched();
+       }
+
+       /* Try to unplug subblocks of plugged offline blocks. */
+       virtio_mem_for_each_mb_state_rev(vm, mb_id,
+                                        VIRTIO_MEM_MB_STATE_OFFLINE) {
+               rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
+                                                        &nb_sb);
+               if (rc || !nb_sb)
+                       goto out_unlock;
+               cond_resched();
+       }
+
+       if (!unplug_online) {
+               mutex_unlock(&vm->hotplug_mutex);
+               return 0;
+       }
+
+       /* Try to unplug subblocks of partially plugged online blocks. */
+       virtio_mem_for_each_mb_state_rev(vm, mb_id,
+                                        VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
+               rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
+                                                       &nb_sb);
+               if (rc || !nb_sb)
+                       goto out_unlock;
+               mutex_unlock(&vm->hotplug_mutex);
+               cond_resched();
+               mutex_lock(&vm->hotplug_mutex);
+       }
+
+       /* Try to unplug subblocks of plugged online blocks. */
+       virtio_mem_for_each_mb_state_rev(vm, mb_id,
+                                        VIRTIO_MEM_MB_STATE_ONLINE) {
+               rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
+                                                       &nb_sb);
+               if (rc || !nb_sb)
+                       goto out_unlock;
+               mutex_unlock(&vm->hotplug_mutex);
+               cond_resched();
+               mutex_lock(&vm->hotplug_mutex);
+       }
+
+       mutex_unlock(&vm->hotplug_mutex);
+       return nb_sb ? -EBUSY : 0;
+out_unlock:
+       mutex_unlock(&vm->hotplug_mutex);
+       return rc;
+}
+
+/*
+ * Try to unplug all blocks that couldn't be unplugged before, for example,
+ * because the hypervisor was busy.
+ */
+static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
+{
+       unsigned long mb_id;
+       int rc;
+
+       virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
+               rc = virtio_mem_mb_unplug(vm, mb_id);
+               if (rc)
+                       return rc;
+               virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+       }
+
+       return 0;
+}
+
+/*
+ * Update all parts of the config that could have changed.
+ */
+static void virtio_mem_refresh_config(struct virtio_mem *vm)
+{
+       const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+       uint64_t new_plugged_size, usable_region_size, end_addr;
+
+       /* the plugged_size is just a reflection of what _we_ did previously */
+       virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
+                    &new_plugged_size);
+       if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
+               vm->plugged_size = new_plugged_size;
+
+       /* calculate the last usable memory block id */
+       virtio_cread(vm->vdev, struct virtio_mem_config,
+                    usable_region_size, &usable_region_size);
+       end_addr = vm->addr + usable_region_size;
+       end_addr = min(end_addr, phys_limit);
+       vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
+
+       /* see if there is a request to change the size */
+       virtio_cread(vm->vdev, struct virtio_mem_config, requested_size,
+                    &vm->requested_size);
+
+       dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
+       dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
+}
+
+/*
+ * Workqueue function for handling plug/unplug requests and config updates.
+ */
+static void virtio_mem_run_wq(struct work_struct *work)
+{
+       struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
+       uint64_t diff;
+       int rc;
+
+       hrtimer_cancel(&vm->retry_timer);
+
+       if (vm->broken)
+               return;
+
+retry:
+       rc = 0;
+
+       /* Make sure we start with a clean state if there are leftovers. */
+       if (unlikely(vm->unplug_all_required))
+               rc = virtio_mem_send_unplug_all_request(vm);
+
+       if (atomic_read(&vm->config_changed)) {
+               atomic_set(&vm->config_changed, 0);
+               virtio_mem_refresh_config(vm);
+       }
+
+       /* Unplug any leftovers from previous runs */
+       if (!rc)
+               rc = virtio_mem_unplug_pending_mb(vm);
+
+       if (!rc && vm->requested_size != vm->plugged_size) {
+               if (vm->requested_size > vm->plugged_size) {
+                       diff = vm->requested_size - vm->plugged_size;
+                       rc = virtio_mem_plug_request(vm, diff);
+               } else {
+                       diff = vm->plugged_size - vm->requested_size;
+                       rc = virtio_mem_unplug_request(vm, diff);
+               }
+       }
+
+       switch (rc) {
+       case 0:
+               vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
+               break;
+       case -ENOSPC:
+               /*
+                * We cannot add any more memory (alignment, physical limit)
+                * or we have too many offline memory blocks.
+                */
+               break;
+       case -ETXTBSY:
+               /*
+                * The hypervisor cannot process our request right now
+                * (e.g., out of memory, migrating);
+                */
+       case -EBUSY:
+               /*
+                * We cannot free up any memory to unplug it (all plugged memory
+                * is busy).
+                */
+       case -ENOMEM:
+               /* Out of memory, try again later. */
+               hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
+                             HRTIMER_MODE_REL);
+               break;
+       case -EAGAIN:
+               /* Retry immediately (e.g., the config changed). */
+               goto retry;
+       default:
+               /* Unknown error, mark as broken */
+               dev_err(&vm->vdev->dev,
+                       "unknown error, marking device broken: %d\n", rc);
+               vm->broken = true;
+       }
+}
+
+static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
+{
+       struct virtio_mem *vm = container_of(timer, struct virtio_mem,
+                                            retry_timer);
+
+       virtio_mem_retry(vm);
+       vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
+                                  VIRTIO_MEM_RETRY_TIMER_MAX_MS);
+       return HRTIMER_NORESTART;
+}
+
+static void virtio_mem_handle_response(struct virtqueue *vq)
+{
+       struct virtio_mem *vm = vq->vdev->priv;
+
+       wake_up(&vm->host_resp);
+}
+
+static int virtio_mem_init_vq(struct virtio_mem *vm)
+{
+       struct virtqueue *vq;
+
+       vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
+                                  "guest-request");
+       if (IS_ERR(vq))
+               return PTR_ERR(vq);
+       vm->vq = vq;
+
+       return 0;
+}
+
+static int virtio_mem_init(struct virtio_mem *vm)
+{
+       const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+       uint16_t node_id;
+
+       if (!vm->vdev->config->get) {
+               dev_err(&vm->vdev->dev, "config access disabled\n");
+               return -EINVAL;
+       }
+
+       /*
+        * We don't want to (un)plug or reuse any memory when in kdump. The
+        * memory is still accessible (but not mapped).
+        */
+       if (is_kdump_kernel()) {
+               dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
+               return -EBUSY;
+       }
+
+       /* Fetch all properties that can't change. */
+       virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
+                    &vm->plugged_size);
+       virtio_cread(vm->vdev, struct virtio_mem_config, block_size,
+                    &vm->device_block_size);
+       virtio_cread(vm->vdev, struct virtio_mem_config, node_id,
+                    &node_id);
+       vm->nid = virtio_mem_translate_node_id(vm, node_id);
+       virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
+       virtio_cread(vm->vdev, struct virtio_mem_config, region_size,
+                    &vm->region_size);
+
+       /*
+        * We always hotplug memory in memory block granularity. This way,
+        * we have to wait for exactly one memory block to online.
+        */
+       if (vm->device_block_size > memory_block_size_bytes()) {
+               dev_err(&vm->vdev->dev,
+                       "The block size is not supported (too big).\n");
+               return -EINVAL;
+       }
+
+       /* bad device setup - warn only */
+       if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
+               dev_warn(&vm->vdev->dev,
+                        "The alignment of the physical start address can make some memory unusable.\n");
+       if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
+               dev_warn(&vm->vdev->dev,
+                        "The alignment of the physical end address can make some memory unusable.\n");
+       if (vm->addr + vm->region_size > phys_limit)
+               dev_warn(&vm->vdev->dev,
+                        "Some memory is not addressable. This can make some memory unusable.\n");
+
+       /*
+        * Calculate the subblock size:
+        * - At least MAX_ORDER - 1 / pageblock_order.
+        * - At least the device block size.
+        * In the worst case, a single subblock per memory block.
+        */
+       vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
+                                                    pageblock_order);
+       vm->subblock_size = max_t(uint64_t, vm->device_block_size,
+                                 vm->subblock_size);
+       vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
+
+       /* Round up to the next full memory block */
+       vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
+                                                  memory_block_size_bytes());
+       vm->next_mb_id = vm->first_mb_id;
+       vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
+                        vm->region_size) - 1;
+
+       dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
+       dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
+       dev_info(&vm->vdev->dev, "device block size: 0x%llx",
+                (unsigned long long)vm->device_block_size);
+       dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
+                memory_block_size_bytes());
+       dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
+                (unsigned long long)vm->subblock_size);
+       if (vm->nid != NUMA_NO_NODE)
+               dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
+
+       return 0;
+}
+
+static int virtio_mem_create_resource(struct virtio_mem *vm)
+{
+       /*
+        * When force-unloading the driver and removing the device, we
+        * could have a garbage pointer. Duplicate the string.
+        */
+       const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
+
+       if (!name)
+               return -ENOMEM;
+
+       vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
+                                                  name, IORESOURCE_SYSTEM_RAM);
+       if (!vm->parent_resource) {
+               kfree(name);
+               dev_warn(&vm->vdev->dev, "could not reserve device region\n");
+               dev_info(&vm->vdev->dev,
+                        "reloading the driver is not supported\n");
+               return -EBUSY;
+       }
+
+       /* The memory is not actually busy - make add_memory() work. */
+       vm->parent_resource->flags &= ~IORESOURCE_BUSY;
+       return 0;
+}
+
+static void virtio_mem_delete_resource(struct virtio_mem *vm)
+{
+       const char *name;
+
+       if (!vm->parent_resource)
+               return;
+
+       name = vm->parent_resource->name;
+       release_resource(vm->parent_resource);
+       kfree(vm->parent_resource);
+       kfree(name);
+       vm->parent_resource = NULL;
+}
+
+static int virtio_mem_probe(struct virtio_device *vdev)
+{
+       struct virtio_mem *vm;
+       int rc;
+
+       BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
+       BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
+
+       vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+       if (!vm)
+               return -ENOMEM;
+
+       init_waitqueue_head(&vm->host_resp);
+       vm->vdev = vdev;
+       INIT_WORK(&vm->wq, virtio_mem_run_wq);
+       mutex_init(&vm->hotplug_mutex);
+       INIT_LIST_HEAD(&vm->next);
+       spin_lock_init(&vm->removal_lock);
+       hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       vm->retry_timer.function = virtio_mem_timer_expired;
+       vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
+
+       /* register the virtqueue */
+       rc = virtio_mem_init_vq(vm);
+       if (rc)
+               goto out_free_vm;
+
+       /* initialize the device by querying the config */
+       rc = virtio_mem_init(vm);
+       if (rc)
+               goto out_del_vq;
+
+       /* create the parent resource for all memory */
+       rc = virtio_mem_create_resource(vm);
+       if (rc)
+               goto out_del_vq;
+
+       /*
+        * If we still have memory plugged, we have to unplug all memory first.
+        * Registering our parent resource makes sure that this memory isn't
+        * actually in use (e.g., trying to reload the driver).
+        */
+       if (vm->plugged_size) {
+               vm->unplug_all_required = 1;
+               dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
+       }
+
+       /* register callbacks */
+       vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
+       rc = register_memory_notifier(&vm->memory_notifier);
+       if (rc)
+               goto out_del_resource;
+       rc = register_virtio_mem_device(vm);
+       if (rc)
+               goto out_unreg_mem;
+
+       virtio_device_ready(vdev);
+
+       /* trigger a config update to start processing the requested_size */
+       atomic_set(&vm->config_changed, 1);
+       queue_work(system_freezable_wq, &vm->wq);
+
+       return 0;
+out_unreg_mem:
+       unregister_memory_notifier(&vm->memory_notifier);
+out_del_resource:
+       virtio_mem_delete_resource(vm);
+out_del_vq:
+       vdev->config->del_vqs(vdev);
+out_free_vm:
+       kfree(vm);
+       vdev->priv = NULL;
+
+       return rc;
+}
+
+static void virtio_mem_remove(struct virtio_device *vdev)
+{
+       struct virtio_mem *vm = vdev->priv;
+       unsigned long mb_id;
+       int rc;
+
+       /*
+        * Make sure the workqueue won't be triggered anymore and no memory
+        * blocks can be onlined/offlined until we're finished here.
+        */
+       mutex_lock(&vm->hotplug_mutex);
+       spin_lock_irq(&vm->removal_lock);
+       vm->removing = true;
+       spin_unlock_irq(&vm->removal_lock);
+       mutex_unlock(&vm->hotplug_mutex);
+
+       /* wait until the workqueue stopped */
+       cancel_work_sync(&vm->wq);
+       hrtimer_cancel(&vm->retry_timer);
+
+       /*
+        * After we unregistered our callbacks, user space can online partially
+        * plugged offline blocks. Make sure to remove them.
+        */
+       virtio_mem_for_each_mb_state(vm, mb_id,
+                                    VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
+               rc = virtio_mem_mb_remove(vm, mb_id);
+               BUG_ON(rc);
+               virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+       }
+       /*
+        * After we unregistered our callbacks, user space can no longer
+        * offline partially plugged online memory blocks. No need to worry
+        * about them.
+        */
+
+       /* unregister callbacks */
+       unregister_virtio_mem_device(vm);
+       unregister_memory_notifier(&vm->memory_notifier);
+
+       /*
+        * There is no way we could reliably remove all memory we have added to
+        * the system. And there is no way to stop the driver/device from going
+        * away. Warn at least.
+        */
+       if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+               dev_warn(&vdev->dev, "device still has system memory added\n");
+       else
+               virtio_mem_delete_resource(vm);
+
+       /* remove all tracking data - no locking needed */
+       vfree(vm->mb_state);
+       vfree(vm->sb_bitmap);
+
+       /* reset the device and cleanup the queues */
+       vdev->config->reset(vdev);
+       vdev->config->del_vqs(vdev);
+
+       kfree(vm);
+       vdev->priv = NULL;
+}
+
+static void virtio_mem_config_changed(struct virtio_device *vdev)
+{
+       struct virtio_mem *vm = vdev->priv;
+
+       atomic_set(&vm->config_changed, 1);
+       virtio_mem_retry(vm);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtio_mem_freeze(struct virtio_device *vdev)
+{
+       /*
+        * When restarting the VM, all memory is usually unplugged. Don't
+        * allow to suspend/hibernate.
+        */
+       dev_err(&vdev->dev, "save/restore not supported.\n");
+       return -EPERM;
+}
+
+static int virtio_mem_restore(struct virtio_device *vdev)
+{
+       return -EPERM;
+}
+#endif
+
+static unsigned int virtio_mem_features[] = {
+#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
+       VIRTIO_MEM_F_ACPI_PXM,
+#endif
+};
+
+static struct virtio_device_id virtio_mem_id_table[] = {
+       { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
+       { 0 },
+};
+
+static struct virtio_driver virtio_mem_driver = {
+       .feature_table = virtio_mem_features,
+       .feature_table_size = ARRAY_SIZE(virtio_mem_features),
+       .driver.name = KBUILD_MODNAME,
+       .driver.owner = THIS_MODULE,
+       .id_table = virtio_mem_id_table,
+       .probe = virtio_mem_probe,
+       .remove = virtio_mem_remove,
+       .config_changed = virtio_mem_config_changed,
+#ifdef CONFIG_PM_SLEEP
+       .freeze =       virtio_mem_freeze,
+       .restore =      virtio_mem_restore,
+#endif
+};
+
+module_virtio_driver(virtio_mem_driver);
+MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
+MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
+MODULE_DESCRIPTION("Virtio-mem driver");
+MODULE_LICENSE("GPL");
index 97d5725..9d16aaf 100644 (file)
@@ -466,10 +466,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        int irq = platform_get_irq(vm_dev->pdev, 0);
        int i, err, queue_idx = 0;
 
-       if (irq < 0) {
-               dev_err(&vdev->dev, "Cannot get IRQ resource\n");
+       if (irq < 0)
                return irq;
-       }
 
        err = request_irq(irq, vm_interrupt, IRQF_SHARED,
                        dev_name(&vdev->dev), vm_dev);
index 7abcc50..db93ced 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/delay.h>
 #define VIRTIO_PCI_NO_LEGACY
+#define VIRTIO_RING_NO_LEGACY
 #include "virtio_pci_common.h"
 
 /*
index 0968859..108edbc 100644 (file)
@@ -64,7 +64,7 @@ static void xen_get_runstate_snapshot_cpu_delta(
        do {
                state_time = get64(&state->state_entry_time);
                rmb();  /* Hypervisor might update data. */
-               *res = READ_ONCE(*state);
+               *res = __READ_ONCE(*state);
                rmb();  /* Hypervisor might update data. */
        } while (get64(&state->state_entry_time) != state_time ||
                 (state_time & XEN_RUNSTATE_UPDATE));
index e5d50bd..9fe3b51 100644 (file)
@@ -208,7 +208,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
                size_t len = strlen(k_platform) + 1;
 
                u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
-               if (__copy_to_user(u_platform, k_platform, len))
+               if (copy_to_user(u_platform, k_platform, len))
                        return -EFAULT;
        }
 
@@ -221,7 +221,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
                size_t len = strlen(k_base_platform) + 1;
 
                u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
-               if (__copy_to_user(u_base_platform, k_base_platform, len))
+               if (copy_to_user(u_base_platform, k_base_platform, len))
                        return -EFAULT;
        }
 
@@ -231,7 +231,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
        get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
        u_rand_bytes = (elf_addr_t __user *)
                       STACK_ALLOC(p, sizeof(k_rand_bytes));
-       if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+       if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
                return -EFAULT;
 
        /* Create the ELF interpreter info */
@@ -314,21 +314,21 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
                return -EFAULT;
 
        /* Now, let's put argc (and argv, envp if appropriate) on the stack */
-       if (__put_user(argc, sp++))
+       if (put_user(argc, sp++))
                return -EFAULT;
 
        /* Populate list of argv pointers back to argv strings. */
        p = mm->arg_end = mm->arg_start;
        while (argc-- > 0) {
                size_t len;
-               if (__put_user((elf_addr_t)p, sp++))
+               if (put_user((elf_addr_t)p, sp++))
                        return -EFAULT;
                len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
                        return -EINVAL;
                p += len;
        }
-       if (__put_user(0, sp++))
+       if (put_user(0, sp++))
                return -EFAULT;
        mm->arg_end = p;
 
@@ -336,14 +336,14 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
        mm->env_end = mm->env_start = p;
        while (envc-- > 0) {
                size_t len;
-               if (__put_user((elf_addr_t)p, sp++))
+               if (put_user((elf_addr_t)p, sp++))
                        return -EFAULT;
                len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
                        return -EINVAL;
                p += len;
        }
-       if (__put_user(0, sp++))
+       if (put_user(0, sp++))
                return -EFAULT;
        mm->env_end = p;
 
index aaf332d..0f45521 100644 (file)
@@ -536,7 +536,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
                platform_len = strlen(k_platform) + 1;
                sp -= platform_len;
                u_platform = (char __user *) sp;
-               if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
+               if (copy_to_user(u_platform, k_platform, platform_len) != 0)
                        return -EFAULT;
        }
 
@@ -551,7 +551,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
                platform_len = strlen(k_base_platform) + 1;
                sp -= platform_len;
                u_base_platform = (char __user *) sp;
-               if (__copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
+               if (copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
                        return -EFAULT;
        }
 
@@ -603,11 +603,13 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
        /* put the ELF interpreter info on the stack */
 #define NEW_AUX_ENT(id, val)                                           \
        do {                                                            \
-               struct { unsigned long _id, _val; } __user *ent;        \
+               struct { unsigned long _id, _val; } __user *ent, v;     \
                                                                        \
                ent = (void __user *) csp;                              \
-               __put_user((id), &ent[nr]._id);                         \
-               __put_user((val), &ent[nr]._val);                       \
+               v._id = (id);                                           \
+               v._val = (val);                                         \
+               if (copy_to_user(ent + nr, &v, sizeof(v)))              \
+                       return -EFAULT;                                 \
                nr++;                                                   \
        } while (0)
 
@@ -674,7 +676,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
 
        /* stack argc */
        csp -= sizeof(unsigned long);
-       __put_user(bprm->argc, (unsigned long __user *) csp);
+       if (put_user(bprm->argc, (unsigned long __user *) csp))
+               return -EFAULT;
 
        BUG_ON(csp != sp);
 
@@ -688,25 +691,29 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
 
        p = (char __user *) current->mm->arg_start;
        for (loop = bprm->argc; loop > 0; loop--) {
-               __put_user((elf_caddr_t) p, argv++);
+               if (put_user((elf_caddr_t) p, argv++))
+                       return -EFAULT;
                len = strnlen_user(p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
                        return -EINVAL;
                p += len;
        }
-       __put_user(NULL, argv);
+       if (put_user(NULL, argv))
+               return -EFAULT;
        current->mm->arg_end = (unsigned long) p;
 
        /* fill in the envv[] array */
        current->mm->env_start = (unsigned long) p;
        for (loop = bprm->envc; loop > 0; loop--) {
-               __put_user((elf_caddr_t)(unsigned long) p, envp++);
+               if (put_user((elf_caddr_t)(unsigned long) p, envp++))
+                       return -EFAULT;
                len = strnlen_user(p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
                        return -EINVAL;
                p += len;
        }
-       __put_user(NULL, envp);
+       if (put_user(NULL, envp))
+               return -EFAULT;
        current->mm->env_end = (unsigned long) p;
 
        mm->start_stack = (unsigned long) sp;
@@ -848,8 +855,8 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
 
                                tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
                                dyn = (Elf32_Dyn __user *)params->dynamic_addr;
-                               __get_user(d_tag, &dyn[tmp - 1].d_tag);
-                               if (d_tag != 0)
+                               if (get_user(d_tag, &dyn[tmp - 1].d_tag) ||
+                                   d_tag != 0)
                                        goto dynamic_error;
                                break;
                        }
index 87ce229..f2f9086 100644 (file)
@@ -138,35 +138,40 @@ static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start
        current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
        sp = (unsigned long __user *)current->mm->start_stack;
 
-       __put_user(bprm->argc, sp++);
+       if (put_user(bprm->argc, sp++))
+               return -EFAULT;
        if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) {
                unsigned long argv, envp;
                argv = (unsigned long)(sp + 2);
                envp = (unsigned long)(sp + 2 + bprm->argc + 1);
-               __put_user(argv, sp++);
-               __put_user(envp, sp++);
+               if (put_user(argv, sp++) || put_user(envp, sp++))
+                       return -EFAULT;
        }
 
        current->mm->arg_start = (unsigned long)p;
        for (i = bprm->argc; i > 0; i--) {
-               __put_user((unsigned long)p, sp++);
+               if (put_user((unsigned long)p, sp++))
+                       return -EFAULT;
                len = strnlen_user(p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
                        return -EINVAL;
                p += len;
        }
-       __put_user(0, sp++);
+       if (put_user(0, sp++))
+               return -EFAULT;
        current->mm->arg_end = (unsigned long)p;
 
        current->mm->env_start = (unsigned long) p;
        for (i = bprm->envc; i > 0; i--) {
-               __put_user((unsigned long)p, sp++);
+               if (put_user((unsigned long)p, sp++))
+                       return -EFAULT;
                len = strnlen_user(p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
                        return -EINVAL;
                p += len;
        }
-       __put_user(0, sp++);
+       if (put_user(0, sp++))
+               return -EFAULT;
        current->mm->env_end = (unsigned long)p;
 
        return 0;
@@ -996,7 +1001,8 @@ static int load_flat_binary(struct linux_binprm *bprm)
                        unsigned long __user *sp;
                        current->mm->start_stack -= sizeof(unsigned long);
                        sp = (unsigned long __user *)current->mm->start_stack;
-                       __put_user(start_addr, sp);
+                       if (put_user(start_addr, sp))
+                               return -EFAULT;
                        start_addr = libinfo.lib_list[i].entry;
                }
        }
index 78e41c7..df466ef 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/idr.h>
+#include <linux/uio.h>
 
 DEFINE_PER_CPU(int, eventfd_wake_count);
 
@@ -216,32 +217,32 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
 }
 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
 
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
-                           loff_t *ppos)
+static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct file *file = iocb->ki_filp;
        struct eventfd_ctx *ctx = file->private_data;
-       ssize_t res;
        __u64 ucnt = 0;
        DECLARE_WAITQUEUE(wait, current);
 
-       if (count < sizeof(ucnt))
+       if (iov_iter_count(to) < sizeof(ucnt))
                return -EINVAL;
-
        spin_lock_irq(&ctx->wqh.lock);
-       res = -EAGAIN;
-       if (ctx->count > 0)
-               res = sizeof(ucnt);
-       else if (!(file->f_flags & O_NONBLOCK)) {
+       if (!ctx->count) {
+               if ((file->f_flags & O_NONBLOCK) ||
+                   (iocb->ki_flags & IOCB_NOWAIT)) {
+                       spin_unlock_irq(&ctx->wqh.lock);
+                       return -EAGAIN;
+               }
                __add_wait_queue(&ctx->wqh, &wait);
                for (;;) {
                        set_current_state(TASK_INTERRUPTIBLE);
-                       if (ctx->count > 0) {
-                               res = sizeof(ucnt);
+                       if (ctx->count)
                                break;
-                       }
                        if (signal_pending(current)) {
-                               res = -ERESTARTSYS;
-                               break;
+                               __remove_wait_queue(&ctx->wqh, &wait);
+                               __set_current_state(TASK_RUNNING);
+                               spin_unlock_irq(&ctx->wqh.lock);
+                               return -ERESTARTSYS;
                        }
                        spin_unlock_irq(&ctx->wqh.lock);
                        schedule();
@@ -250,17 +251,14 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
                __remove_wait_queue(&ctx->wqh, &wait);
                __set_current_state(TASK_RUNNING);
        }
-       if (likely(res > 0)) {
-               eventfd_ctx_do_read(ctx, &ucnt);
-               if (waitqueue_active(&ctx->wqh))
-                       wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
-       }
+       eventfd_ctx_do_read(ctx, &ucnt);
+       if (waitqueue_active(&ctx->wqh))
+               wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
        spin_unlock_irq(&ctx->wqh.lock);
-
-       if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
+       if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
                return -EFAULT;
 
-       return res;
+       return sizeof(ucnt);
 }
 
 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -329,7 +327,7 @@ static const struct file_operations eventfd_fops = {
 #endif
        .release        = eventfd_release,
        .poll           = eventfd_poll,
-       .read           = eventfd_read,
+       .read_iter      = eventfd_read,
        .write          = eventfd_write,
        .llseek         = noop_llseek,
 };
@@ -406,6 +404,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
 static int do_eventfd(unsigned int count, int flags)
 {
        struct eventfd_ctx *ctx;
+       struct file *file;
        int fd;
 
        /* Check the EFD_* constants for consistency.  */
@@ -425,11 +424,24 @@ static int do_eventfd(unsigned int count, int flags)
        ctx->flags = flags;
        ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
 
-       fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
-                             O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
+       flags &= EFD_SHARED_FCNTL_FLAGS;
+       flags |= O_RDWR;
+       fd = get_unused_fd_flags(flags);
        if (fd < 0)
-               eventfd_free_ctx(ctx);
+               goto err;
+
+       file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
+       if (IS_ERR(file)) {
+               put_unused_fd(fd);
+               fd = PTR_ERR(file);
+               goto err;
+       }
 
+       file->f_mode |= FMODE_NOWAIT;
+       fd_install(fd, file);
+       return fd;
+err:
+       eventfd_free_ctx(ctx);
        return fd;
 }
 
index 6fd1f6e..7df0f9f 100644 (file)
@@ -1557,6 +1557,9 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
 {
        bool rc;
 
+       if (lease->fl_lmops->lm_breaker_owns_lease
+                       && lease->fl_lmops->lm_breaker_owns_lease(lease))
+               return false;
        if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
                rc = false;
                goto trace;
index 7cd6424..f30ed40 100644 (file)
@@ -684,9 +684,6 @@ bool __is_local_mountpoint(struct dentry *dentry)
        struct mount *mnt;
        bool is_covered = false;
 
-       if (!d_mountpoint(dentry))
-               goto out;
-
        down_read(&namespace_sem);
        lock_ns_list(ns);
        list_for_each_entry(mnt, &ns->list, mnt_list) {
@@ -698,7 +695,7 @@ bool __is_local_mountpoint(struct dentry *dentry)
        }
        unlock_ns_list(ns);
        up_read(&namespace_sem);
-out:
+
        return is_covered;
 }
 
index a57e7c7..1b79dd5 100644 (file)
@@ -446,7 +446,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
        struct inode *inode = mapping->host;
        struct nfs_direct_req *dreq;
        struct nfs_lock_context *l_ctx;
-       ssize_t result = -EINVAL, requested;
+       ssize_t result, requested;
        size_t count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
@@ -731,6 +731,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
                nfs_list_remove_request(req);
                if (request_commit) {
                        kref_get(&req->wb_kref);
+                       memcpy(&req->wb_verf, &hdr->verf.verifier,
+                              sizeof(req->wb_verf));
                        nfs_mark_request_commit(req, hdr->lseg, &cinfo,
                                hdr->ds_commit_idx);
                }
index 9638000..e87d500 100644 (file)
@@ -39,7 +39,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
 #include <linux/string.h>
 #include <linux/kmod.h>
 #include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/socket.h>
 #include <linux/seq_file.h>
 #include <linux/inet.h>
index b9d0921..0bf1f83 100644 (file)
@@ -833,6 +833,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
                do_update |= cache_validity & NFS_INO_INVALID_ATIME;
        if (request_mask & (STATX_CTIME|STATX_MTIME))
                do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE;
+       if (request_mask & STATX_BLOCKS)
+               do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
        if (do_update) {
                /* Update the attribute cache */
                if (!(server->flags & NFS_MOUNT_NOAC))
@@ -1764,7 +1766,8 @@ out_noforce:
        status = nfs_post_op_update_inode_locked(inode, fattr,
                        NFS_INO_INVALID_CHANGE
                        | NFS_INO_INVALID_CTIME
-                       | NFS_INO_INVALID_MTIME);
+                       | NFS_INO_INVALID_MTIME
+                       | NFS_INO_INVALID_BLOCKS);
        return status;
 }
 
@@ -1871,7 +1874,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
        nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
                        | NFS_INO_INVALID_ATIME
                        | NFS_INO_REVAL_FORCED
-                       | NFS_INO_REVAL_PAGECACHE);
+                       | NFS_INO_REVAL_PAGECACHE
+                       | NFS_INO_INVALID_BLOCKS);
 
        /* Do atomic weak cache consistency updates */
        nfs_wcc_update_inode(inode, fattr);
@@ -2033,8 +2037,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
        } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
                inode->i_blocks = fattr->du.nfs2.blocks;
-       else
+       else {
+               nfsi->cache_validity |= save_cache_validity &
+                               (NFS_INO_INVALID_BLOCKS
+                               | NFS_INO_REVAL_FORCED);
                cache_revalidated = false;
+       }
 
        /* Update attrtimeo value if we're out of the unstable period */
        if (attr_changed) {
index a46d1d5..2397cee 100644 (file)
@@ -179,11 +179,11 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
        if (nfs_lookup_is_soft_revalidate(dentry))
                task_flags |= RPC_TASK_TIMEOUT;
 
-       dprintk("NFS call  lookup %pd2\n", dentry);
        res.dir_attr = nfs_alloc_fattr();
        if (res.dir_attr == NULL)
                return -ENOMEM;
 
+       dprintk("NFS call  lookup %pd2\n", dentry);
        nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
        nfs_refresh_inode(dir, res.dir_attr);
index 9056f3d..e32717f 100644 (file)
@@ -7909,7 +7909,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
 }
 
 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
-       .rpc_call_done =  &nfs4_bind_one_conn_to_session_done,
+       .rpc_call_done =  nfs4_bind_one_conn_to_session_done,
 };
 
 /*
index 7e7a97a..547cec7 100644 (file)
@@ -961,6 +961,97 @@ TRACE_EVENT(nfs_readpage_done,
                )
 );
 
+TRACE_EVENT(nfs_readpage_short,
+               TP_PROTO(
+                       const struct rpc_task *task,
+                       const struct nfs_pgio_header *hdr
+               ),
+
+               TP_ARGS(task, hdr),
+
+               TP_STRUCT__entry(
+                       __field(dev_t, dev)
+                       __field(u32, fhandle)
+                       __field(u64, fileid)
+                       __field(loff_t, offset)
+                       __field(u32, arg_count)
+                       __field(u32, res_count)
+                       __field(bool, eof)
+                       __field(int, status)
+               ),
+
+               TP_fast_assign(
+                       const struct inode *inode = hdr->inode;
+                       const struct nfs_inode *nfsi = NFS_I(inode);
+                       const struct nfs_fh *fh = hdr->args.fh ?
+                                                 hdr->args.fh : &nfsi->fh;
+
+                       __entry->status = task->tk_status;
+                       __entry->offset = hdr->args.offset;
+                       __entry->arg_count = hdr->args.count;
+                       __entry->res_count = hdr->res.count;
+                       __entry->eof = hdr->res.eof;
+                       __entry->dev = inode->i_sb->s_dev;
+                       __entry->fileid = nfsi->fileid;
+                       __entry->fhandle = nfs_fhandle_hash(fh);
+               ),
+
+               TP_printk(
+                       "fileid=%02x:%02x:%llu fhandle=0x%08x "
+                       "offset=%lld count=%u res=%u status=%d%s",
+                       MAJOR(__entry->dev), MINOR(__entry->dev),
+                       (unsigned long long)__entry->fileid,
+                       __entry->fhandle,
+                       (long long)__entry->offset, __entry->arg_count,
+                       __entry->res_count, __entry->status,
+                       __entry->eof ? " eof" : ""
+               )
+);
+
+TRACE_EVENT(nfs_pgio_error,
+       TP_PROTO(
+               const struct nfs_pgio_header *hdr,
+               int error,
+               loff_t pos
+       ),
+
+       TP_ARGS(hdr, error, pos),
+
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(u32, fhandle)
+               __field(u64, fileid)
+               __field(loff_t, offset)
+               __field(u32, arg_count)
+               __field(u32, res_count)
+               __field(loff_t, pos)
+               __field(int, status)
+       ),
+
+       TP_fast_assign(
+               const struct inode *inode = hdr->inode;
+               const struct nfs_inode *nfsi = NFS_I(inode);
+               const struct nfs_fh *fh = hdr->args.fh ?
+                                         hdr->args.fh : &nfsi->fh;
+
+               __entry->status = error;
+               __entry->offset = hdr->args.offset;
+               __entry->arg_count = hdr->args.count;
+               __entry->res_count = hdr->res.count;
+               __entry->dev = inode->i_sb->s_dev;
+               __entry->fileid = nfsi->fileid;
+               __entry->fhandle = nfs_fhandle_hash(fh);
+       ),
+
+       TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
+                 "offset=%lld count=%u res=%u pos=%llu status=%d",
+               MAJOR(__entry->dev), MINOR(__entry->dev),
+               (unsigned long long)__entry->fileid, __entry->fhandle,
+               (long long)__entry->offset, __entry->arg_count, __entry->res_count,
+               __entry->pos, __entry->status
+       )
+);
+
 TRACE_DEFINE_ENUM(NFS_UNSTABLE);
 TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
 TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
@@ -1312,7 +1403,12 @@ TRACE_EVENT(nfs_xdr_status,
                        __field(unsigned int, task_id)
                        __field(unsigned int, client_id)
                        __field(u32, xid)
+                       __field(int, version)
                        __field(unsigned long, error)
+                       __string(program,
+                                xdr->rqst->rq_task->tk_client->cl_program->name)
+                       __string(procedure,
+                                xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
                ),
 
                TP_fast_assign(
@@ -1322,13 +1418,19 @@ TRACE_EVENT(nfs_xdr_status,
                        __entry->task_id = task->tk_pid;
                        __entry->client_id = task->tk_client->cl_clid;
                        __entry->xid = be32_to_cpu(rqstp->rq_xid);
+                       __entry->version = task->tk_client->cl_vers;
                        __entry->error = error;
+                       __assign_str(program,
+                                    task->tk_client->cl_program->name)
+                       __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
                ),
 
                TP_printk(
-                       "task:%u@%d xid=0x%08x error=%ld (%s)",
+                       "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)",
                        __entry->task_id, __entry->client_id, __entry->xid,
-                       -__entry->error, nfs_show_status(__entry->error)
+                       __get_str(program), __entry->version,
+                       __get_str(procedure), -__entry->error,
+                       nfs_show_status(__entry->error)
                )
 );
 
index 6ca421c..6ea4cac 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "internal.h"
 #include "pnfs.h"
+#include "nfstrace.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PAGECACHE
 
@@ -64,6 +65,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
 {
        unsigned int new = pos - hdr->io_start;
 
+       trace_nfs_pgio_error(hdr, error, pos);
        if (hdr->good_bytes > new) {
                hdr->good_bytes = new;
                clear_bit(NFS_IOHDR_EOF, &hdr->flags);
index 13b22e8..eb854f1 100644 (file)
@@ -264,6 +264,8 @@ static void nfs_readpage_retry(struct rpc_task *task,
 
        /* This is a short read! */
        nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
+       trace_nfs_readpage_short(task, hdr);
+
        /* Has the server at least made some progress? */
        if (resp->count == 0) {
                nfs_set_pgio_error(hdr, -EIO, argp->offset);
index f1b2741..ebcbdc4 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2019 Hammerspace Inc
  */
index 10ec5ec..65c331f 100644 (file)
@@ -78,6 +78,8 @@ enum {
 /* Checksum this amount of the request */
 #define RC_CSUMLEN             (256U)
 
+int    nfsd_drc_slab_create(void);
+void   nfsd_drc_slab_free(void);
 int    nfsd_reply_cache_init(struct nfsd_net *);
 void   nfsd_reply_cache_shutdown(struct nfsd_net *);
 int    nfsd_cache_lookup(struct svc_rqst *);
index 09aa545..9217cb6 100644 (file)
@@ -139,7 +139,6 @@ struct nfsd_net {
         * Duplicate reply cache
         */
        struct nfsd_drc_bucket   *drc_hashtbl;
-       struct kmem_cache        *drc_slab;
 
        /* max number of entries allowed in the cache */
        unsigned int             max_drc_entries;
index 5cf9132..7fbe984 100644 (file)
@@ -38,6 +38,7 @@
 #include "nfsd.h"
 #include "state.h"
 #include "netns.h"
+#include "trace.h"
 #include "xdr4cb.h"
 #include "xdr4.h"
 
@@ -904,16 +905,20 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
 
        if (clp->cl_minorversion == 0) {
                if (!clp->cl_cred.cr_principal &&
-                               (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
+                   (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
+                       trace_nfsd_cb_setup_err(clp, -EINVAL);
                        return -EINVAL;
+               }
                args.client_name = clp->cl_cred.cr_principal;
                args.prognumber = conn->cb_prog;
                args.protocol = XPRT_TRANSPORT_TCP;
                args.authflavor = clp->cl_cred.cr_flavor;
                clp->cl_cb_ident = conn->cb_ident;
        } else {
-               if (!conn->cb_xprt)
+               if (!conn->cb_xprt) {
+                       trace_nfsd_cb_setup_err(clp, -EINVAL);
                        return -EINVAL;
+               }
                clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
                clp->cl_cb_session = ses;
                args.bc_xprt = conn->cb_xprt;
@@ -925,32 +930,27 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
        /* Create RPC client */
        client = rpc_create(&args);
        if (IS_ERR(client)) {
-               dprintk("NFSD: couldn't create callback client: %ld\n",
-                       PTR_ERR(client));
+               trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
                return PTR_ERR(client);
        }
        cred = get_backchannel_cred(clp, client, ses);
        if (!cred) {
+               trace_nfsd_cb_setup_err(clp, -ENOMEM);
                rpc_shutdown_client(client);
                return -ENOMEM;
        }
        clp->cl_cb_client = client;
        clp->cl_cb_cred = cred;
+       trace_nfsd_cb_setup(clp);
        return 0;
 }
 
-static void warn_no_callback_path(struct nfs4_client *clp, int reason)
-{
-       dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
-               (int)clp->cl_name.len, clp->cl_name.data, reason);
-}
-
 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
 {
        if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
                return;
        clp->cl_cb_state = NFSD4_CB_DOWN;
-       warn_no_callback_path(clp, reason);
+       trace_nfsd_cb_state(clp);
 }
 
 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
@@ -958,17 +958,20 @@ static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
        if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
                return;
        clp->cl_cb_state = NFSD4_CB_FAULT;
-       warn_no_callback_path(clp, reason);
+       trace_nfsd_cb_state(clp);
 }
 
 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
 {
        struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
 
+       trace_nfsd_cb_done(clp, task->tk_status);
        if (task->tk_status)
                nfsd4_mark_cb_down(clp, task->tk_status);
-       else
+       else {
                clp->cl_cb_state = NFSD4_CB_UP;
+               trace_nfsd_cb_state(clp);
+       }
 }
 
 static void nfsd4_cb_probe_release(void *calldata)
@@ -993,6 +996,7 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
 void nfsd4_probe_callback(struct nfs4_client *clp)
 {
        clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+       trace_nfsd_cb_state(clp);
        set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
        nfsd4_run_cb(&clp->cl_cb_null);
 }
@@ -1009,6 +1013,7 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
        spin_lock(&clp->cl_lock);
        memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
        spin_unlock(&clp->cl_lock);
+       trace_nfsd_cb_state(clp);
 }
 
 /*
@@ -1165,8 +1170,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
        struct nfsd4_callback *cb = calldata;
        struct nfs4_client *clp = cb->cb_clp;
 
-       dprintk("%s: minorversion=%d\n", __func__,
-               clp->cl_minorversion);
+       trace_nfsd_cb_done(clp, task->tk_status);
 
        if (!nfsd4_cb_sequence_done(task, cb))
                return;
@@ -1271,6 +1275,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
         * kill the old client:
         */
        if (clp->cl_cb_client) {
+               trace_nfsd_cb_shutdown(clp);
                rpc_shutdown_client(clp->cl_cb_client);
                clp->cl_cb_client = NULL;
                put_cred(clp->cl_cb_cred);
@@ -1301,6 +1306,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
        err = setup_callback_client(clp, &conn, ses);
        if (err) {
                nfsd4_mark_cb_down(clp, err);
+               if (c)
+                       svc_xprt_put(c->cn_xprt);
                return;
        }
 }
@@ -1314,6 +1321,8 @@ nfsd4_run_cb_work(struct work_struct *work)
        struct rpc_clnt *clnt;
        int flags;
 
+       trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
+
        if (cb->cb_need_restart) {
                cb->cb_need_restart = false;
        } else {
index 0e75f7f..a09c35f 100644 (file)
@@ -1155,7 +1155,7 @@ extern void nfs_sb_deactive(struct super_block *sb);
 
 #define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
 
-/**
+/*
  * Support one copy source server for now.
  */
 static __be32
@@ -1245,10 +1245,9 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
        mntput(ss_mnt);
 }
 
-/**
- * nfsd4_setup_inter_ssc
- *
+/*
  * Verify COPY destination stateid.
+ *
  * Connect to the source server with NFSv4.1.
  * Create the source struct file for nfsd_copy_range.
  * Called with COPY cstate:
@@ -2302,6 +2301,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
        }
        check_if_stalefh_allowed(args);
 
+       rqstp->rq_lease_breaker = (void **)&cstate->clp;
+
        trace_nfsd_compound(rqstp, args->opcnt);
        while (!status && resp->opcnt < args->opcnt) {
                op = &args->ops[resp->opcnt++];
index c107caa..bb3d2c3 100644 (file)
@@ -51,6 +51,7 @@
 #include "netns.h"
 #include "pnfs.h"
 #include "filecache.h"
+#include "trace.h"
 
 #define NFSDDBG_FACILITY                NFSDDBG_PROC
 
@@ -167,9 +168,6 @@ renew_client_locked(struct nfs4_client *clp)
                return;
        }
 
-       dprintk("renewing client (clientid %08x/%08x)\n",
-                       clp->cl_clientid.cl_boot,
-                       clp->cl_clientid.cl_id);
        list_move_tail(&clp->cl_lru, &nn->client_lru);
        clp->cl_time = ktime_get_boottime_seconds();
 }
@@ -1922,8 +1920,7 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
         */
        if (clid->cl_boot == (u32)nn->boot_time)
                return 0;
-       dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n",
-               clid->cl_boot, clid->cl_id, nn->boot_time);
+       trace_nfsd_clid_stale(clid);
        return 1;
 }
 
@@ -2406,6 +2403,11 @@ static void states_stop(struct seq_file *s, void *v)
        spin_unlock(&clp->cl_lock);
 }
 
+static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
+{
+         seq_printf(s, "filename: \"%pD2\"", f->nf_file);
+}
+
 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
 {
        struct inode *inode = f->nf_inode;
@@ -2422,6 +2424,12 @@ static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
        seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
 }
 
+static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
+{
+       seq_printf(s, "0x%.8x", stid->si_generation);
+       seq_printf(s, "%12phN", &stid->si_opaque);
+}
+
 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
 {
        struct nfs4_ol_stateid *ols;
@@ -2437,7 +2445,9 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
        nf = st->sc_file;
        file = find_any_file(nf);
 
-       seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
+       seq_printf(s, "- ");
+       nfs4_show_stateid(s, &st->sc_stateid);
+       seq_printf(s, ": { type: open, ");
 
        access = bmap_to_share_mode(ols->st_access_bmap);
        deny   = bmap_to_share_mode(ols->st_deny_bmap);
@@ -2451,6 +2461,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
 
        nfs4_show_superblock(s, file);
        seq_printf(s, ", ");
+       nfs4_show_fname(s, file);
+       seq_printf(s, ", ");
        nfs4_show_owner(s, oo);
        seq_printf(s, " }\n");
        nfsd_file_put(file);
@@ -2470,7 +2482,9 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
        nf = st->sc_file;
        file = find_any_file(nf);
 
-       seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
+       seq_printf(s, "- ");
+       nfs4_show_stateid(s, &st->sc_stateid);
+       seq_printf(s, ": { type: lock, ");
 
        /*
         * Note: a lock stateid isn't really the same thing as a lock,
@@ -2482,6 +2496,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
        nfs4_show_superblock(s, file);
        /* XXX: open stateid? */
        seq_printf(s, ", ");
+       nfs4_show_fname(s, file);
+       seq_printf(s, ", ");
        nfs4_show_owner(s, oo);
        seq_printf(s, " }\n");
        nfsd_file_put(file);
@@ -2499,7 +2515,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
        nf = st->sc_file;
        file = nf->fi_deleg_file;
 
-       seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
+       seq_printf(s, "- ");
+       nfs4_show_stateid(s, &st->sc_stateid);
+       seq_printf(s, ": { type: deleg, ");
 
        /* Kinda dead code as long as we only support read delegs: */
        seq_printf(s, "access: %s, ",
@@ -2508,6 +2526,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
        /* XXX: lease time, whether it's being recalled. */
 
        nfs4_show_superblock(s, file);
+       seq_printf(s, ", ");
+       nfs4_show_fname(s, file);
        seq_printf(s, " }\n");
 
        return 0;
@@ -2521,11 +2541,15 @@ static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
        ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
        file = ls->ls_file;
 
-       seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid);
+       seq_printf(s, "- ");
+       nfs4_show_stateid(s, &st->sc_stateid);
+       seq_printf(s, ": { type: layout, ");
 
        /* XXX: What else would be useful? */
 
        nfs4_show_superblock(s, file);
+       seq_printf(s, ", ");
+       nfs4_show_fname(s, file);
        seq_printf(s, " }\n");
 
        return 0;
@@ -2845,14 +2869,12 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r
        conn->cb_prog = se->se_callback_prog;
        conn->cb_ident = se->se_callback_ident;
        memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
+       trace_nfsd_cb_args(clp, conn);
        return;
 out_err:
        conn->cb_addr.ss_family = AF_UNSPEC;
        conn->cb_addrlen = 0;
-       dprintk("NFSD: this client (clientid %08x/%08x) "
-               "will not receive delegations\n",
-               clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
-
+       trace_nfsd_cb_nodelegs(clp);
        return;
 }
 
@@ -3458,6 +3480,45 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
        return nfs_ok;
 }
 
+static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
+{
+       struct nfsd4_conn *c;
+
+       list_for_each_entry(c, &s->se_conns, cn_persession) {
+               if (c->cn_xprt == xpt) {
+                       return c;
+               }
+       }
+       return NULL;
+}
+
+static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
+                               struct nfsd4_session *session, u32 req)
+{
+       struct nfs4_client *clp = session->se_client;
+       struct svc_xprt *xpt = rqst->rq_xprt;
+       struct nfsd4_conn *c;
+       __be32 status;
+
+       /* Following the last paragraph of RFC 5661 Section 18.34.3: */
+       spin_lock(&clp->cl_lock);
+       c = __nfsd4_find_conn(xpt, session);
+       if (!c)
+               status = nfserr_noent;
+       else if (req == c->cn_flags)
+               status = nfs_ok;
+       else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
+                               c->cn_flags != NFS4_CDFC4_BACK)
+               status = nfs_ok;
+       else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
+                               c->cn_flags != NFS4_CDFC4_FORE)
+               status = nfs_ok;
+       else
+               status = nfserr_inval;
+       spin_unlock(&clp->cl_lock);
+       return status;
+}
+
 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
                     struct nfsd4_compound_state *cstate,
                     union nfsd4_op_u *u)
@@ -3479,6 +3540,9 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
        status = nfserr_wrong_cred;
        if (!nfsd4_mach_creds_match(session->se_client, rqstp))
                goto out;
+       status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
+       if (status == nfs_ok || status == nfserr_inval)
+               goto out;
        status = nfsd4_map_bcts_dir(&bcts->dir);
        if (status)
                goto out;
@@ -3544,18 +3608,6 @@ out:
        return status;
 }
 
-static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
-{
-       struct nfsd4_conn *c;
-
-       list_for_each_entry(c, &s->se_conns, cn_persession) {
-               if (c->cn_xprt == xpt) {
-                       return c;
-               }
-       }
-       return NULL;
-}
-
 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
 {
        struct nfs4_client *clp = ses->se_client;
@@ -3879,23 +3931,18 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                if (clp_used_exchangeid(conf))
                        goto out;
                if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
-                       char addr_str[INET6_ADDRSTRLEN];
-                       rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
-                                sizeof(addr_str));
-                       dprintk("NFSD: setclientid: string in use by client "
-                               "at %s\n", addr_str);
+                       trace_nfsd_clid_inuse_err(conf);
                        goto out;
                }
        }
        unconf = find_unconfirmed_client_by_name(&clname, nn);
        if (unconf)
                unhash_client_locked(unconf);
+       /* We need to handle only case 1: probable callback update */
        if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
-               /* case 1: probable callback update */
                copy_clid(new, conf);
                gen_confirm(new, nn);
-       } else /* case 4 (new client) or cases 2, 3 (client reboot): */
-               ;
+       }
        new->cl_minorversion = 0;
        gen_callback(new, setclid, rqstp);
        add_to_unconfirmed(new);
@@ -4076,7 +4123,6 @@ out_free_openowner_slab:
 out_free_client_slab:
        kmem_cache_destroy(client_slab);
 out:
-       dprintk("nfsd4: out of memory while initializing nfsv4\n");
        return -ENOMEM;
 }
 
@@ -4508,6 +4554,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
        struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
        struct nfs4_file *fp = dp->dl_stid.sc_file;
 
+       trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
+
        /*
         * We don't want the locks code to timeout the lease for us;
         * we'll remove it ourself if a delegation isn't returned
@@ -4522,6 +4570,19 @@ nfsd_break_deleg_cb(struct file_lock *fl)
        return ret;
 }
 
+static bool nfsd_breaker_owns_lease(struct file_lock *fl)
+{
+       struct nfs4_delegation *dl = fl->fl_owner;
+       struct svc_rqst *rqst;
+       struct nfs4_client *clp;
+
+       if (!i_am_nfsd())
+               return NULL;
+       rqst = kthread_data(current);
+       clp = *(rqst->rq_lease_breaker);
+       return dl->dl_stid.sc_client == clp;
+}
+
 static int
 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
                     struct list_head *dispose)
@@ -4533,6 +4594,7 @@ nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
 }
 
 static const struct lock_manager_operations nfsd_lease_mng_ops = {
+       .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
        .lm_break = nfsd_break_deleg_cb,
        .lm_change = nfsd_change_deleg_cb,
 };
@@ -5018,8 +5080,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
 
        memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
 
-       dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
-               STATEID_VAL(&dp->dl_stid.sc_stateid));
+       trace_nfsd_deleg_open(&dp->dl_stid.sc_stateid);
        open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
        nfs4_put_stid(&dp->dl_stid);
        return;
@@ -5136,9 +5197,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        nfs4_open_delegation(current_fh, open, stp);
 nodeleg:
        status = nfs_ok;
-
-       dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
-               STATEID_VAL(&stp->st_stid.sc_stateid));
+       trace_nfsd_deleg_none(&stp->st_stid.sc_stateid);
 out:
        /* 4.1 client trying to upgrade/downgrade delegation? */
        if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
@@ -5192,8 +5251,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        __be32 status;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-       dprintk("process_renew(%08x/%08x): starting\n", 
-                       clid->cl_boot, clid->cl_id);
+       trace_nfsd_clid_renew(clid);
        status = lookup_clientid(clid, cstate, nn, false);
        if (status)
                goto out;
@@ -5214,6 +5272,7 @@ nfsd4_end_grace(struct nfsd_net *nn)
        if (nn->grace_ended)
                return;
 
+       trace_nfsd_grace_complete(nn);
        nn->grace_ended = true;
        /*
         * If the server goes down again right now, an NFSv4
@@ -5279,13 +5338,10 @@ nfs4_laundromat(struct nfsd_net *nn)
        copy_stateid_t *cps_t;
        int i;
 
-       dprintk("NFSD: laundromat service - starting\n");
-
        if (clients_still_reclaiming(nn)) {
                new_timeo = 0;
                goto out;
        }
-       dprintk("NFSD: end of grace period\n");
        nfsd4_end_grace(nn);
        INIT_LIST_HEAD(&reaplist);
 
@@ -5307,8 +5363,7 @@ nfs4_laundromat(struct nfsd_net *nn)
                        break;
                }
                if (mark_client_expired_locked(clp)) {
-                       dprintk("NFSD: client in use (clientid %08x)\n",
-                               clp->cl_clientid.cl_id);
+                       trace_nfsd_clid_expired(&clp->cl_clientid);
                        continue;
                }
                list_add(&clp->cl_lru, &reaplist);
@@ -5316,8 +5371,7 @@ nfs4_laundromat(struct nfsd_net *nn)
        spin_unlock(&nn->client_lock);
        list_for_each_safe(pos, next, &reaplist) {
                clp = list_entry(pos, struct nfs4_client, cl_lru);
-               dprintk("NFSD: purging unused client (clientid %08x)\n",
-                       clp->cl_clientid.cl_id);
+               trace_nfsd_clid_purged(&clp->cl_clientid);
                list_del_init(&clp->cl_lru);
                expire_client(clp);
        }
@@ -5407,7 +5461,6 @@ laundromat_main(struct work_struct *laundry)
                                           laundromat_work);
 
        t = nfs4_laundromat(nn);
-       dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t);
        queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
@@ -5948,8 +6001,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
        struct nfs4_stid *s;
        struct nfs4_ol_stateid *stp = NULL;
 
-       dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
-               seqid, STATEID_VAL(stateid));
+       trace_nfsd_preprocess(seqid, stateid);
 
        *stpp = NULL;
        status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
@@ -6018,9 +6070,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        oo->oo_flags |= NFS4_OO_CONFIRMED;
        nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
        mutex_unlock(&stp->st_mutex);
-       dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
-               __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
-
+       trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
        nfsd4_client_record_create(oo->oo_owner.so_client);
        status = nfs_ok;
 put_stateid:
@@ -7072,7 +7122,7 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
        unsigned int strhashval;
        struct nfs4_client_reclaim *crp;
 
-       dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name.len, name.data);
+       trace_nfsd_clid_reclaim(nn, name.len, name.data);
        crp = alloc_reclaim();
        if (crp) {
                strhashval = clientstr_hashval(name);
@@ -7122,7 +7172,7 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
        unsigned int strhashval;
        struct nfs4_client_reclaim *crp = NULL;
 
-       dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name.len, name.data);
+       trace_nfsd_clid_find(nn, name.len, name.data);
 
        strhashval = clientstr_hashval(name);
        list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
@@ -7686,6 +7736,9 @@ nfsd_recall_delegations(struct list_head *reaplist)
        list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
                list_del_init(&dp->dl_recall_lru);
                clp = dp->dl_stid.sc_client;
+
+               trace_nfsd_deleg_recall(&dp->dl_stid.sc_stateid);
+
                /*
                 * We skipped all entries that had a zero dl_time before,
                 * so we can now reset the dl_time back to 0. If a delegation
@@ -7868,6 +7921,7 @@ nfs4_state_start_net(struct net *net)
                goto skip_grace;
        printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
               nn->nfsd4_grace, net->ns.inum);
+       trace_nfsd_grace_start(nn);
        queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
        return 0;
 
index 96352ab..0a0cf1f 100644 (file)
@@ -20,8 +20,7 @@
 
 #include "nfsd.h"
 #include "cache.h"
-
-#define NFSDDBG_FACILITY       NFSDDBG_REPCACHE
+#include "trace.h"
 
 /*
  * We use this value to determine the number of hash buckets from the max
@@ -36,6 +35,8 @@ struct nfsd_drc_bucket {
        spinlock_t cache_lock;
 };
 
+static struct kmem_cache       *drc_slab;
+
 static int     nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
                                            struct shrink_control *sc);
@@ -95,7 +96,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
 {
        struct svc_cacherep     *rp;
 
-       rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL);
+       rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
        if (rp) {
                rp->c_state = RC_UNUSED;
                rp->c_type = RC_NOCACHE;
@@ -129,7 +130,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
                atomic_dec(&nn->num_drc_entries);
                nn->drc_mem_usage -= sizeof(*rp);
        }
-       kmem_cache_free(nn->drc_slab, rp);
+       kmem_cache_free(drc_slab, rp);
 }
 
 static void
@@ -141,6 +142,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
        spin_unlock(&b->cache_lock);
 }
 
+int nfsd_drc_slab_create(void)
+{
+       drc_slab = kmem_cache_create("nfsd_drc",
+                               sizeof(struct svc_cacherep), 0, 0, NULL);
+       return drc_slab ? 0: -ENOMEM;
+}
+
+void nfsd_drc_slab_free(void)
+{
+       kmem_cache_destroy(drc_slab);
+}
+
 int nfsd_reply_cache_init(struct nfsd_net *nn)
 {
        unsigned int hashsize;
@@ -159,18 +172,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
        if (status)
                goto out_nomem;
 
-       nn->drc_slab = kmem_cache_create("nfsd_drc",
-                               sizeof(struct svc_cacherep), 0, 0, NULL);
-       if (!nn->drc_slab)
-               goto out_shrinker;
-
        nn->drc_hashtbl = kcalloc(hashsize,
                                sizeof(*nn->drc_hashtbl), GFP_KERNEL);
        if (!nn->drc_hashtbl) {
                nn->drc_hashtbl = vzalloc(array_size(hashsize,
                                                 sizeof(*nn->drc_hashtbl)));
                if (!nn->drc_hashtbl)
-                       goto out_slab;
+                       goto out_shrinker;
        }
 
        for (i = 0; i < hashsize; i++) {
@@ -180,8 +188,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
        nn->drc_hashsize = hashsize;
 
        return 0;
-out_slab:
-       kmem_cache_destroy(nn->drc_slab);
 out_shrinker:
        unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
 out_nomem:
@@ -209,8 +215,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
        nn->drc_hashtbl = NULL;
        nn->drc_hashsize = 0;
 
-       kmem_cache_destroy(nn->drc_slab);
-       nn->drc_slab = NULL;
 }
 
 /*
@@ -323,8 +327,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
                        const struct svc_cacherep *rp, struct nfsd_net *nn)
 {
        if (key->c_key.k_xid == rp->c_key.k_xid &&
-           key->c_key.k_csum != rp->c_key.k_csum)
+           key->c_key.k_csum != rp->c_key.k_csum) {
                ++nn->payload_misses;
+               trace_nfsd_drc_mismatch(nn, key, rp);
+       }
 
        return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
 }
@@ -377,15 +383,22 @@ out:
        return ret;
 }
 
-/*
+/**
+ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+ * @rqstp: Incoming Call to find
+ *
  * Try to find an entry matching the current call in the cache. When none
  * is found, we try to grab the oldest expired entry off the LRU list. If
  * a suitable one isn't there, then drop the cache_lock and allocate a
  * new one, then search again in case one got inserted while this thread
  * didn't hold the lock.
+ *
+ * Return values:
+ *   %RC_DOIT: Process the request normally
+ *   %RC_REPLY: Reply from cache
+ *   %RC_DROPIT: Do not process the request further
  */
-int
-nfsd_cache_lookup(struct svc_rqst *rqstp)
+int nfsd_cache_lookup(struct svc_rqst *rqstp)
 {
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
        struct svc_cacherep     *rp, *found;
@@ -399,7 +412,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
        rqstp->rq_cacherep = NULL;
        if (type == RC_NOCACHE) {
                nfsdstats.rcnocache++;
-               return rtn;
+               goto out;
        }
 
        csum = nfsd_cache_csum(rqstp);
@@ -409,10 +422,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
         * preallocate an entry.
         */
        rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
-       if (!rp) {
-               dprintk("nfsd: unable to allocate DRC entry!\n");
-               return rtn;
-       }
+       if (!rp)
+               goto out;
 
        spin_lock(&b->cache_lock);
        found = nfsd_cache_insert(b, rp, nn);
@@ -431,8 +442,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
 
        /* go ahead and prune the cache */
        prune_bucket(b, nn);
- out:
+
+out_unlock:
        spin_unlock(&b->cache_lock);
+out:
        return rtn;
 
 found_entry:
@@ -442,13 +455,13 @@ found_entry:
 
        /* Request being processed */
        if (rp->c_state == RC_INPROG)
-               goto out;
+               goto out_trace;
 
        /* From the hall of fame of impractical attacks:
         * Is this a user who tries to snoop on the cache? */
        rtn = RC_DOIT;
        if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
-               goto out;
+               goto out_trace;
 
        /* Compose RPC reply header */
        switch (rp->c_type) {
@@ -460,21 +473,26 @@ found_entry:
                break;
        case RC_REPLBUFF:
                if (!nfsd_cache_append(rqstp, &rp->c_replvec))
-                       goto out;       /* should not happen */
+                       goto out_unlock; /* should not happen */
                rtn = RC_REPLY;
                break;
        default:
-               printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
-               nfsd_reply_cache_free_locked(b, rp, nn);
+               WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
        }
 
-       goto out;
+out_trace:
+       trace_nfsd_drc_found(nn, rqstp, rtn);
+       goto out_unlock;
 }
 
-/*
- * Update a cache entry. This is called from nfsd_dispatch when
- * the procedure has been executed and the complete reply is in
- * rqstp->rq_res.
+/**
+ * nfsd_cache_update - Update an entry in the duplicate reply cache.
+ * @rqstp: svc_rqst with a finished Reply
+ * @cachetype: which cache to update
+ * @statp: Reply's status code
+ *
+ * This is called from nfsd_dispatch when the procedure has been
+ * executed and the complete reply is in rqstp->rq_res.
  *
  * We're copying around data here rather than swapping buffers because
  * the toplevel loop requires max-sized buffers, which would be a waste
@@ -487,8 +505,7 @@ found_entry:
  * nfsd failed to encode a reply that otherwise would have been cached.
  * In this case, nfsd_cache_update is called with statp == NULL.
  */
-void
-nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
 {
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
        struct svc_cacherep *rp = rqstp->rq_cacherep;
index 3bb2db9..b68e966 100644 (file)
@@ -238,7 +238,7 @@ static inline struct net *netns(struct file *file)
        return file_inode(file)->i_sb->s_fs_info;
 }
 
-/**
+/*
  * write_unlock_ip - Release all locks used by a client
  *
  * Experimental.
@@ -277,7 +277,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
        return nlmsvc_unlock_all_by_ip(sap);
 }
 
-/**
+/*
  * write_unlock_fs - Release all locks on a local file system
  *
  * Experimental.
@@ -327,7 +327,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
        return error;
 }
 
-/**
+/*
  * write_filehandle - Get a variable-length NFS file handle by path
  *
  * On input, the buffer contains a '\n'-terminated C string comprised of
@@ -402,7 +402,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
        return mesg - buf;      
 }
 
-/**
+/*
  * write_threads - Start NFSD, or report the current number of running threads
  *
  * Input:
@@ -452,7 +452,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
        return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
 }
 
-/**
+/*
  * write_pool_threads - Set or report the current number of threads per pool
  *
  * Input:
@@ -661,7 +661,7 @@ out:
        return tlen + len;
 }
 
-/**
+/*
  * write_versions - Set or report the available NFS protocol versions
  *
  * Input:
@@ -811,7 +811,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size,
        return -EINVAL;
 }
 
-/**
+/*
  * write_ports - Pass a socket file descriptor or transport name to listen on
  *
  * Input:
@@ -867,7 +867,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
 
 int nfsd_max_blksize;
 
-/**
+/*
  * write_maxblksize - Set or report the current NFS blksize
  *
  * Input:
@@ -917,7 +917,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
                                                        nfsd_max_blksize);
 }
 
-/**
+/*
  * write_maxconn - Set or report the current max number of connections
  *
  * Input:
@@ -998,7 +998,7 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
        return rv;
 }
 
-/**
+/*
  * write_leasetime - Set or report the current NFSv4 lease time
  *
  * Input:
@@ -1025,7 +1025,7 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
        return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
 }
 
-/**
+/*
  * write_gracetime - Set or report current NFSv4 grace period time
  *
  * As above, but sets the time of the NFSv4 grace period.
@@ -1069,7 +1069,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
                                                        nfs4_recoverydir());
 }
 
-/**
+/*
  * write_recoverydir - Set or report the pathname of the recovery directory
  *
  * Input:
@@ -1101,7 +1101,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
        return rv;
 }
 
-/**
+/*
  * write_v4_end_grace - release grace period for nfsd's v4.x lock manager
  *
  * Input:
@@ -1533,6 +1533,9 @@ static int __init init_nfsd(void)
                goto out_free_slabs;
        nfsd_fault_inject_init(); /* nfsd fault injection controls */
        nfsd_stat_init();       /* Statistics */
+       retval = nfsd_drc_slab_create();
+       if (retval)
+               goto out_free_stat;
        nfsd_lockd_init();      /* lockd->nfsd callbacks */
        retval = create_proc_exports_entry();
        if (retval)
@@ -1546,6 +1549,8 @@ out_free_all:
        remove_proc_entry("fs/nfs", NULL);
 out_free_lockd:
        nfsd_lockd_shutdown();
+       nfsd_drc_slab_free();
+out_free_stat:
        nfsd_stat_shutdown();
        nfsd_fault_inject_cleanup();
        nfsd4_exit_pnfs();
@@ -1560,6 +1565,7 @@ out_unregister_pernet:
 
 static void __exit exit_nfsd(void)
 {
+       nfsd_drc_slab_free();
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
        nfsd_stat_shutdown();
index 2ab5569..36cdd81 100644 (file)
@@ -88,6 +88,8 @@ int           nfsd_pool_stats_release(struct inode *, struct file *);
 
 void           nfsd_destroy(struct net *net);
 
+bool           i_am_nfsd(void);
+
 struct nfsdfs_client {
        struct kref cl_ref;
        void (*cl_release)(struct kref *kref);
index ca9fd34..b603dfc 100644 (file)
@@ -601,6 +601,11 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
        .svo_module             = THIS_MODULE,
 };
 
+bool i_am_nfsd(void)
+{
+       return kthread_func(current) == nfsd;
+}
+
 int nfsd_create_serv(struct net *net)
 {
        int error;
@@ -1011,6 +1016,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
                *statp = rpc_garbage_args;
                return 1;
        }
+       rqstp->rq_lease_breaker = NULL;
        /*
         * Give the xdr decoder a chance to change this if it wants
         * (necessary in the NFSv4.0 compound case)
index 68d3f30..3b40853 100644 (file)
@@ -64,13 +64,6 @@ typedef struct {
        refcount_t              sc_count;
 } copy_stateid_t;
 
-#define STATEID_FMT    "(%08x/%08x/%08x/%08x)"
-#define STATEID_VAL(s) \
-       (s)->si_opaque.so_clid.cl_boot, \
-       (s)->si_opaque.so_clid.cl_id, \
-       (s)->si_opaque.so_id, \
-       (s)->si_generation
-
 struct nfsd4_callback {
        struct nfs4_client *cb_clp;
        struct rpc_message cb_msg;
index 78c5742..1861db1 100644 (file)
@@ -277,6 +277,7 @@ DECLARE_EVENT_CLASS(nfsd_stateid_class,
 DEFINE_EVENT(nfsd_stateid_class, nfsd_##name, \
        TP_PROTO(stateid_t *stp), \
        TP_ARGS(stp))
+
 DEFINE_STATEID_EVENT(layoutstate_alloc);
 DEFINE_STATEID_EVENT(layoutstate_unhash);
 DEFINE_STATEID_EVENT(layoutstate_free);
@@ -288,6 +289,138 @@ DEFINE_STATEID_EVENT(layout_recall_done);
 DEFINE_STATEID_EVENT(layout_recall_fail);
 DEFINE_STATEID_EVENT(layout_recall_release);
 
+DEFINE_STATEID_EVENT(deleg_open);
+DEFINE_STATEID_EVENT(deleg_none);
+DEFINE_STATEID_EVENT(deleg_break);
+DEFINE_STATEID_EVENT(deleg_recall);
+
+DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
+       TP_PROTO(u32 seqid, const stateid_t *stp),
+       TP_ARGS(seqid, stp),
+       TP_STRUCT__entry(
+               __field(u32, seqid)
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __field(u32, si_id)
+               __field(u32, si_generation)
+       ),
+       TP_fast_assign(
+               __entry->seqid = seqid;
+               __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+               __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+               __entry->si_id = stp->si_opaque.so_id;
+               __entry->si_generation = stp->si_generation;
+       ),
+       TP_printk("seqid=%u client %08x:%08x stateid %08x:%08x",
+               __entry->seqid, __entry->cl_boot, __entry->cl_id,
+               __entry->si_id, __entry->si_generation)
+)
+
+#define DEFINE_STATESEQID_EVENT(name) \
+DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \
+       TP_PROTO(u32 seqid, const stateid_t *stp), \
+       TP_ARGS(seqid, stp))
+
+DEFINE_STATESEQID_EVENT(preprocess);
+DEFINE_STATESEQID_EVENT(open_confirm);
+
+DECLARE_EVENT_CLASS(nfsd_clientid_class,
+       TP_PROTO(const clientid_t *clid),
+       TP_ARGS(clid),
+       TP_STRUCT__entry(
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+       ),
+       TP_fast_assign(
+               __entry->cl_boot = clid->cl_boot;
+               __entry->cl_id = clid->cl_id;
+       ),
+       TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
+)
+
+#define DEFINE_CLIENTID_EVENT(name) \
+DEFINE_EVENT(nfsd_clientid_class, nfsd_clid_##name, \
+       TP_PROTO(const clientid_t *clid), \
+       TP_ARGS(clid))
+
+DEFINE_CLIENTID_EVENT(expired);
+DEFINE_CLIENTID_EVENT(purged);
+DEFINE_CLIENTID_EVENT(renew);
+DEFINE_CLIENTID_EVENT(stale);
+
+DECLARE_EVENT_CLASS(nfsd_net_class,
+       TP_PROTO(const struct nfsd_net *nn),
+       TP_ARGS(nn),
+       TP_STRUCT__entry(
+               __field(unsigned long long, boot_time)
+       ),
+       TP_fast_assign(
+               __entry->boot_time = nn->boot_time;
+       ),
+       TP_printk("boot_time=%16llx", __entry->boot_time)
+)
+
+#define DEFINE_NET_EVENT(name) \
+DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
+       TP_PROTO(const struct nfsd_net *nn), \
+       TP_ARGS(nn))
+
+DEFINE_NET_EVENT(grace_start);
+DEFINE_NET_EVENT(grace_complete);
+
+DECLARE_EVENT_CLASS(nfsd_clid_class,
+       TP_PROTO(const struct nfsd_net *nn,
+                unsigned int namelen,
+                const unsigned char *namedata),
+       TP_ARGS(nn, namelen, namedata),
+       TP_STRUCT__entry(
+               __field(unsigned long long, boot_time)
+               __field(unsigned int, namelen)
+               __dynamic_array(unsigned char,  name, namelen)
+       ),
+       TP_fast_assign(
+               __entry->boot_time = nn->boot_time;
+               __entry->namelen = namelen;
+               memcpy(__get_dynamic_array(name), namedata, namelen);
+       ),
+       TP_printk("boot_time=%16llx nfs4_clientid=%.*s",
+               __entry->boot_time, __entry->namelen, __get_str(name))
+)
+
+#define DEFINE_CLID_EVENT(name) \
+DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
+       TP_PROTO(const struct nfsd_net *nn, \
+                unsigned int namelen, \
+                const unsigned char *namedata), \
+       TP_ARGS(nn, namelen, namedata))
+
+DEFINE_CLID_EVENT(find);
+DEFINE_CLID_EVENT(reclaim);
+
+TRACE_EVENT(nfsd_clid_inuse_err,
+       TP_PROTO(const struct nfs4_client *clp),
+       TP_ARGS(clp),
+       TP_STRUCT__entry(
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+               __field(unsigned int, namelen)
+               __dynamic_array(unsigned char, name, clp->cl_name.len)
+       ),
+       TP_fast_assign(
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+               memcpy(__entry->addr, &clp->cl_addr,
+                       sizeof(struct sockaddr_in6));
+               __entry->namelen = clp->cl_name.len;
+               memcpy(__get_dynamic_array(name), clp->cl_name.data,
+                       clp->cl_name.len);
+       ),
+       TP_printk("nfs4_clientid %.*s already in use by %pISpc, client %08x:%08x",
+               __entry->namelen, __get_str(name), __entry->addr,
+               __entry->cl_boot, __entry->cl_id)
+)
+
 TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
 TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
 TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
@@ -432,6 +565,218 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
                        __entry->nlink, __entry->mode, __entry->mask)
 );
 
+#include "cache.h"
+
+TRACE_DEFINE_ENUM(RC_DROPIT);
+TRACE_DEFINE_ENUM(RC_REPLY);
+TRACE_DEFINE_ENUM(RC_DOIT);
+
+#define show_drc_retval(x)                                             \
+       __print_symbolic(x,                                             \
+               { RC_DROPIT, "DROPIT" },                                \
+               { RC_REPLY, "REPLY" },                                  \
+               { RC_DOIT, "DOIT" })
+
+TRACE_EVENT(nfsd_drc_found,
+       TP_PROTO(
+               const struct nfsd_net *nn,
+               const struct svc_rqst *rqstp,
+               int result
+       ),
+       TP_ARGS(nn, rqstp, result),
+       TP_STRUCT__entry(
+               __field(unsigned long long, boot_time)
+               __field(unsigned long, result)
+               __field(u32, xid)
+       ),
+       TP_fast_assign(
+               __entry->boot_time = nn->boot_time;
+               __entry->result = result;
+               __entry->xid = be32_to_cpu(rqstp->rq_xid);
+       ),
+       TP_printk("boot_time=%16llx xid=0x%08x result=%s",
+               __entry->boot_time, __entry->xid,
+               show_drc_retval(__entry->result))
+
+);
+
+TRACE_EVENT(nfsd_drc_mismatch,
+       TP_PROTO(
+               const struct nfsd_net *nn,
+               const struct svc_cacherep *key,
+               const struct svc_cacherep *rp
+       ),
+       TP_ARGS(nn, key, rp),
+       TP_STRUCT__entry(
+               __field(unsigned long long, boot_time)
+               __field(u32, xid)
+               __field(u32, cached)
+               __field(u32, ingress)
+       ),
+       TP_fast_assign(
+               __entry->boot_time = nn->boot_time;
+               __entry->xid = be32_to_cpu(key->c_key.k_xid);
+               __entry->cached = (__force u32)key->c_key.k_csum;
+               __entry->ingress = (__force u32)rp->c_key.k_csum;
+       ),
+       TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x",
+               __entry->boot_time, __entry->xid, __entry->cached,
+               __entry->ingress)
+);
+
+TRACE_EVENT(nfsd_cb_args,
+       TP_PROTO(
+               const struct nfs4_client *clp,
+               const struct nfs4_cb_conn *conn
+       ),
+       TP_ARGS(clp, conn),
+       TP_STRUCT__entry(
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __field(u32, prog)
+               __field(u32, ident)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+       TP_fast_assign(
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+               __entry->prog = conn->cb_prog;
+               __entry->ident = conn->cb_ident;
+               memcpy(__entry->addr, &conn->cb_addr,
+                       sizeof(struct sockaddr_in6));
+       ),
+       TP_printk("client %08x:%08x callback addr=%pISpc prog=%u ident=%u",
+               __entry->cl_boot, __entry->cl_id,
+               __entry->addr, __entry->prog, __entry->ident)
+);
+
+TRACE_EVENT(nfsd_cb_nodelegs,
+       TP_PROTO(const struct nfs4_client *clp),
+       TP_ARGS(clp),
+       TP_STRUCT__entry(
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+       ),
+       TP_fast_assign(
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+       ),
+       TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
+)
+
+TRACE_DEFINE_ENUM(NFSD4_CB_UP);
+TRACE_DEFINE_ENUM(NFSD4_CB_UNKNOWN);
+TRACE_DEFINE_ENUM(NFSD4_CB_DOWN);
+TRACE_DEFINE_ENUM(NFSD4_CB_FAULT);
+
+#define show_cb_state(val)                                             \
+       __print_symbolic(val,                                           \
+               { NFSD4_CB_UP,          "UP" },                         \
+               { NFSD4_CB_UNKNOWN,     "UNKNOWN" },                    \
+               { NFSD4_CB_DOWN,        "DOWN" },                       \
+               { NFSD4_CB_FAULT,       "FAULT"})
+
+DECLARE_EVENT_CLASS(nfsd_cb_class,
+       TP_PROTO(const struct nfs4_client *clp),
+       TP_ARGS(clp),
+       TP_STRUCT__entry(
+               __field(unsigned long, state)
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+       TP_fast_assign(
+               __entry->state = clp->cl_cb_state;
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+               memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+                       sizeof(struct sockaddr_in6));
+       ),
+       TP_printk("addr=%pISpc client %08x:%08x state=%s",
+               __entry->addr, __entry->cl_boot, __entry->cl_id,
+               show_cb_state(__entry->state))
+);
+
+#define DEFINE_NFSD_CB_EVENT(name)                     \
+DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name,            \
+       TP_PROTO(const struct nfs4_client *clp),        \
+       TP_ARGS(clp))
+
+DEFINE_NFSD_CB_EVENT(setup);
+DEFINE_NFSD_CB_EVENT(state);
+DEFINE_NFSD_CB_EVENT(shutdown);
+
+TRACE_EVENT(nfsd_cb_setup_err,
+       TP_PROTO(
+               const struct nfs4_client *clp,
+               long error
+       ),
+       TP_ARGS(clp, error),
+       TP_STRUCT__entry(
+               __field(long, error)
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+       TP_fast_assign(
+               __entry->error = error;
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+               memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+                       sizeof(struct sockaddr_in6));
+       ),
+       TP_printk("addr=%pISpc client %08x:%08x error=%ld",
+               __entry->addr, __entry->cl_boot, __entry->cl_id, __entry->error)
+);
+
+TRACE_EVENT(nfsd_cb_work,
+       TP_PROTO(
+               const struct nfs4_client *clp,
+               const char *procedure
+       ),
+       TP_ARGS(clp, procedure),
+       TP_STRUCT__entry(
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __string(procedure, procedure)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+       TP_fast_assign(
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+               __assign_str(procedure, procedure)
+               memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+                       sizeof(struct sockaddr_in6));
+       ),
+       TP_printk("addr=%pISpc client %08x:%08x procedure=%s",
+               __entry->addr, __entry->cl_boot, __entry->cl_id,
+               __get_str(procedure))
+);
+
+TRACE_EVENT(nfsd_cb_done,
+       TP_PROTO(
+               const struct nfs4_client *clp,
+               int status
+       ),
+       TP_ARGS(clp, status),
+       TP_STRUCT__entry(
+               __field(u32, cl_boot)
+               __field(u32, cl_id)
+               __field(int, status)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+       TP_fast_assign(
+               __entry->cl_boot = clp->cl_clientid.cl_boot;
+               __entry->cl_id = clp->cl_clientid.cl_id;
+               __entry->status = status;
+               memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+                       sizeof(struct sockaddr_in6));
+       ),
+       TP_printk("addr=%pISpc client %08x:%08x status=%d",
+               __entry->addr, __entry->cl_boot, __entry->cl_id,
+               __entry->status)
+);
+
 #endif /* _NFSD_TRACE_H */
 
 #undef TRACE_INCLUDE_PATH
index 5b405f3..42c5128 100644 (file)
@@ -565,6 +565,10 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
        if (!table->proc_handler)
                goto out;
 
+       /* don't even try if the size is too large */
+       if (count > KMALLOC_MAX_SIZE)
+               return -ENOMEM;
+
        if (write) {
                kbuf = memdup_user_nul(ubuf, count);
                if (IS_ERR(kbuf)) {
index ffebed1..5e444d4 100644 (file)
@@ -264,11 +264,13 @@ static void proc_kill_sb(struct super_block *sb)
 {
        struct proc_fs_info *fs_info = proc_sb_info(sb);
 
-       if (fs_info->proc_self)
-               dput(fs_info->proc_self);
+       if (!fs_info) {
+               kill_anon_super(sb);
+               return;
+       }
 
-       if (fs_info->proc_thread_self)
-               dput(fs_info->proc_thread_self);
+       dput(fs_info->proc_self);
+       dput(fs_info->proc_thread_self);
 
        kill_anon_super(sb);
        put_pid_ns(fs_info->pid_ns);
index 11d0285..7aef495 100644 (file)
@@ -766,22 +766,38 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
  * which has a pointer to the sigset_t itself followed by a size_t containing
  * the sigset size.
  */
+struct sigset_argpack {
+       sigset_t __user *p;
+       size_t size;
+};
+
+static inline int get_sigset_argpack(struct sigset_argpack *to,
+                                    struct sigset_argpack __user *from)
+{
+       // the path is hot enough for overhead of copy_from_user() to matter
+       if (from) {
+               if (!user_read_access_begin(from, sizeof(*from)))
+                       return -EFAULT;
+               unsafe_get_user(to->p, &from->p, Efault);
+               unsafe_get_user(to->size, &from->size, Efault);
+               user_read_access_end();
+       }
+       return 0;
+Efault:
+       user_access_end();
+       return -EFAULT;
+}
+
 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
                fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
                void __user *, sig)
 {
-       size_t sigsetsize = 0;
-       sigset_t __user *up = NULL;
-
-       if (sig) {
-               if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
-                   || __get_user(up, (sigset_t __user * __user *)sig)
-                   || __get_user(sigsetsize,
-                               (size_t __user *)(sig+sizeof(void *))))
-                       return -EFAULT;
-       }
+       struct sigset_argpack x = {NULL, 0};
+
+       if (get_sigset_argpack(&x, sig))
+               return -EFAULT;
 
-       return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC);
+       return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
 }
 
 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
@@ -790,18 +806,12 @@ SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *,
                fd_set __user *, exp, struct old_timespec32 __user *, tsp,
                void __user *, sig)
 {
-       size_t sigsetsize = 0;
-       sigset_t __user *up = NULL;
-
-       if (sig) {
-               if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
-                   || __get_user(up, (sigset_t __user * __user *)sig)
-                   || __get_user(sigsetsize,
-                               (size_t __user *)(sig+sizeof(void *))))
-                       return -EFAULT;
-       }
+       struct sigset_argpack x = {NULL, 0};
+
+       if (get_sigset_argpack(&x, sig))
+               return -EFAULT;
 
-       return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC);
+       return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
 }
 
 #endif
@@ -1325,24 +1335,37 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
        return poll_select_finish(&end_time, tsp, type, ret);
 }
 
+struct compat_sigset_argpack {
+       compat_uptr_t p;
+       compat_size_t size;
+};
+static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
+                                           struct compat_sigset_argpack __user *from)
+{
+       if (from) {
+               if (!user_read_access_begin(from, sizeof(*from)))
+                       return -EFAULT;
+               unsafe_get_user(to->p, &from->p, Efault);
+               unsafe_get_user(to->size, &from->size, Efault);
+               user_read_access_end();
+       }
+       return 0;
+Efault:
+       user_access_end();
+       return -EFAULT;
+}
+
 COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
        compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
        struct __kernel_timespec __user *, tsp, void __user *, sig)
 {
-       compat_size_t sigsetsize = 0;
-       compat_uptr_t up = 0;
-
-       if (sig) {
-               if (!access_ok(sig,
-                               sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
-                               __get_user(up, (compat_uptr_t __user *)sig) ||
-                               __get_user(sigsetsize,
-                               (compat_size_t __user *)(sig+sizeof(up))))
-                       return -EFAULT;
-       }
+       struct compat_sigset_argpack x = {0, 0};
+
+       if (get_compat_sigset_argpack(&x, sig))
+               return -EFAULT;
 
-       return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
-                                sigsetsize, PT_TIMESPEC);
+       return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
+                                x.size, PT_TIMESPEC);
 }
 
 #if defined(CONFIG_COMPAT_32BIT_TIME)
@@ -1351,20 +1374,13 @@ COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
        compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
        struct old_timespec32 __user *, tsp, void __user *, sig)
 {
-       compat_size_t sigsetsize = 0;
-       compat_uptr_t up = 0;
-
-       if (sig) {
-               if (!access_ok(sig,
-                               sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
-                       __get_user(up, (compat_uptr_t __user *)sig) ||
-                       __get_user(sigsetsize,
-                               (compat_size_t __user *)(sig+sizeof(up))))
-                       return -EFAULT;
-       }
+       struct compat_sigset_argpack x = {0, 0};
+
+       if (get_compat_sigset_argpack(&x, sig))
+               return -EFAULT;
 
-       return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
-                                sigsetsize, PT_OLD_TIMESPEC);
+       return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
+                                x.size, PT_OLD_TIMESPEC);
 }
 
 #endif
index bf3b768..904459b 100644 (file)
@@ -361,7 +361,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
  */
 void deactivate_super(struct super_block *s)
 {
-        if (!atomic_add_unless(&s->s_active, -1, 1)) {
+       if (!atomic_add_unless(&s->s_active, -1, 1)) {
                down_write(&s->s_umount);
                deactivate_locked_super(s);
        }
index a40f88c..a190212 100644 (file)
@@ -1236,64 +1236,26 @@ xfs_ioctl_setattr_xflags(
        return 0;
 }
 
-/*
- * If we are changing DAX flags, we have to ensure the file is clean and any
- * cached objects in the address space are invalidated and removed. This
- * requires us to lock out other IO and page faults similar to a truncate
- * operation. The locks need to be held until the transaction has been committed
- * so that the cache invalidation is atomic with respect to the DAX flag
- * manipulation.
- */
-static int
-xfs_ioctl_setattr_dax_invalidate(
+static void
+xfs_ioctl_setattr_prepare_dax(
        struct xfs_inode        *ip,
-       struct fsxattr          *fa,
-       int                     *join_flags)
+       struct fsxattr          *fa)
 {
-       struct inode            *inode = VFS_I(ip);
-       struct super_block      *sb = inode->i_sb;
-       int                     error;
-
-       *join_flags = 0;
-
-       /*
-        * It is only valid to set the DAX flag on regular files and
-        * directories on filesystems where the block size is equal to the page
-        * size. On directories it serves as an inherited hint so we don't
-        * have to check the device for dax support or flush pagecache.
-        */
-       if (fa->fsx_xflags & FS_XFLAG_DAX) {
-               struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
-
-               if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
-                       return -EINVAL;
-       }
-
-       /* If the DAX state is not changing, we have nothing to do here. */
-       if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
-               return 0;
-       if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
-               return 0;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct inode            *inode = VFS_I(ip);
 
        if (S_ISDIR(inode->i_mode))
-               return 0;
-
-       /* lock, flush and invalidate mapping in preparation for flag change */
-       xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
-       error = filemap_write_and_wait(inode->i_mapping);
-       if (error)
-               goto out_unlock;
-       error = invalidate_inode_pages2(inode->i_mapping);
-       if (error)
-               goto out_unlock;
-
-       *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
-       return 0;
+               return;
 
-out_unlock:
-       xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
-       return error;
+       if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) ||
+           (mp->m_flags & XFS_MOUNT_DAX_NEVER))
+               return;
 
+       if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
+           !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) ||
+           (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
+            (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)))
+               d_mark_dontcache(inode);
 }
 
 /*
@@ -1301,17 +1263,10 @@ out_unlock:
  * have permission to do so. On success, return a clean transaction and the
  * inode locked exclusively ready for further operation specific checks. On
  * failure, return an error without modifying or locking the inode.
- *
- * The inode might already be IO locked on call. If this is the case, it is
- * indicated in @join_flags and we take full responsibility for ensuring they
- * are unlocked from now on. Hence if we have an error here, we still have to
- * unlock them. Otherwise, once they are joined to the transaction, they will
- * be unlocked on commit/cancel.
  */
 static struct xfs_trans *
 xfs_ioctl_setattr_get_trans(
-       struct xfs_inode        *ip,
-       int                     join_flags)
+       struct xfs_inode        *ip)
 {
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_trans        *tp;
@@ -1328,8 +1283,7 @@ xfs_ioctl_setattr_get_trans(
                goto out_unlock;
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
-       join_flags = 0;
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
        /*
         * CAP_FOWNER overrides the following restrictions:
@@ -1350,8 +1304,6 @@ xfs_ioctl_setattr_get_trans(
 out_cancel:
        xfs_trans_cancel(tp);
 out_unlock:
-       if (join_flags)
-               xfs_iunlock(ip, join_flags);
        return ERR_PTR(error);
 }
 
@@ -1476,7 +1428,6 @@ xfs_ioctl_setattr(
        struct xfs_dquot        *pdqp = NULL;
        struct xfs_dquot        *olddquot = NULL;
        int                     code;
-       int                     join_flags = 0;
 
        trace_xfs_ioctl_setattr(ip);
 
@@ -1500,18 +1451,9 @@ xfs_ioctl_setattr(
                        return code;
        }
 
-       /*
-        * Changing DAX config may require inode locking for mapping
-        * invalidation. These need to be held all the way to transaction commit
-        * or cancel time, so need to be passed through to
-        * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
-        * appropriately.
-        */
-       code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
-       if (code)
-               goto error_free_dquots;
+       xfs_ioctl_setattr_prepare_dax(ip, fa);
 
-       tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
+       tp = xfs_ioctl_setattr_get_trans(ip);
        if (IS_ERR(tp)) {
                code = PTR_ERR(tp);
                goto error_free_dquots;
@@ -1639,7 +1581,6 @@ xfs_ioc_setxflags(
        struct fsxattr          fa;
        struct fsxattr          old_fa;
        unsigned int            flags;
-       int                     join_flags = 0;
        int                     error;
 
        if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1656,18 +1597,9 @@ xfs_ioc_setxflags(
        if (error)
                return error;
 
-       /*
-        * Changing DAX config may require inode locking for mapping
-        * invalidation. These need to be held all the way to transaction commit
-        * or cancel time, so need to be passed through to
-        * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
-        * appropriately.
-        */
-       error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
-       if (error)
-               goto out_drop_write;
+       xfs_ioctl_setattr_prepare_dax(ip, &fa);
 
-       tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
+       tp = xfs_ioctl_setattr_get_trans(ip);
        if (IS_ERR(tp)) {
                error = PTR_ERR(tp);
                goto out_drop_write;
index 1dc8d26..459d698 100644 (file)
@@ -12,7 +12,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20200430
+#define ACPI_CA_VERSION                 0x20200528
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
index 4defed5..aa236b9 100644 (file)
@@ -815,8 +815,9 @@ typedef u8 acpi_adr_space_type;
 #define ACPI_ADR_SPACE_GPIO             (acpi_adr_space_type) 8
 #define ACPI_ADR_SPACE_GSBUS            (acpi_adr_space_type) 9
 #define ACPI_ADR_SPACE_PLATFORM_COMM    (acpi_adr_space_type) 10
+#define ACPI_ADR_SPACE_PLATFORM_RT      (acpi_adr_space_type) 11
 
-#define ACPI_NUM_PREDEFINED_REGIONS     11
+#define ACPI_NUM_PREDEFINED_REGIONS     12
 
 /*
  * Special Address Spaces
index 85b28eb..2eacaf7 100644 (file)
@@ -128,10 +128,10 @@ do {                                                                      \
 #ifndef __smp_load_acquire
 #define __smp_load_acquire(p)                                          \
 ({                                                                     \
-       typeof(*p) ___p1 = READ_ONCE(*p);                               \
+       __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);               \
        compiletime_assert_atomic_type(*p);                             \
        __smp_mb();                                                     \
-       ___p1;                                                          \
+       (typeof(*p))___p1;                                              \
 })
 #endif
 
@@ -183,10 +183,10 @@ do {                                                                      \
 #ifndef smp_load_acquire
 #define smp_load_acquire(p)                                            \
 ({                                                                     \
-       typeof(*p) ___p1 = READ_ONCE(*p);                               \
+       __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);               \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
-       ___p1;                                                          \
+       (typeof(*p))___p1;                                              \
 })
 #endif
 
@@ -229,14 +229,14 @@ do {                                                                      \
 #ifndef smp_cond_load_relaxed
 #define smp_cond_load_relaxed(ptr, cond_expr) ({               \
        typeof(ptr) __PTR = (ptr);                              \
-       typeof(*ptr) VAL;                                       \
+       __unqual_scalar_typeof(*ptr) VAL;                       \
        for (;;) {                                              \
                VAL = READ_ONCE(*__PTR);                        \
                if (cond_expr)                                  \
                        break;                                  \
                cpu_relax();                                    \
        }                                                       \
-       VAL;                                                    \
+       (typeof(*ptr))VAL;                                      \
 })
 #endif
 
@@ -250,10 +250,10 @@ do {                                                                      \
  */
 #ifndef smp_cond_load_acquire
 #define smp_cond_load_acquire(ptr, cond_expr) ({               \
-       typeof(*ptr) _val;                                      \
+       __unqual_scalar_typeof(*ptr) _val;                      \
        _val = smp_cond_load_relaxed(ptr, cond_expr);           \
        smp_acquire__after_ctrl_dep();                          \
-       _val;                                                   \
+       (typeof(*ptr))_val;                                     \
 })
 #endif
 
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
new file mode 100644 (file)
index 0000000..4c23eef
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MAILBOX_IPCC_H
+#define __DT_BINDINGS_MAILBOX_IPCC_H
+
+/* Signal IDs for MPROC protocol */
+#define IPCC_MPROC_SIGNAL_GLINK_QMP    0
+#define IPCC_MPROC_SIGNAL_SMP2P                2
+#define IPCC_MPROC_SIGNAL_PING         3
+
+/* Client IDs */
+#define IPCC_CLIENT_AOP                        0
+#define IPCC_CLIENT_TZ                 1
+#define IPCC_CLIENT_MPSS               2
+#define IPCC_CLIENT_LPASS              3
+#define IPCC_CLIENT_SLPI               4
+#define IPCC_CLIENT_SDC                        5
+#define IPCC_CLIENT_CDSP               6
+#define IPCC_CLIENT_NPU                        7
+#define IPCC_CLIENT_APSS               8
+#define IPCC_CLIENT_GPU                        9
+#define IPCC_CLIENT_CVP                        10
+#define IPCC_CLIENT_CAM                        11
+#define IPCC_CLIENT_VPU                        12
+#define IPCC_CLIENT_PCIE0              13
+#define IPCC_CLIENT_PCIE1              14
+#define IPCC_CLIENT_PCIE2              15
+#define IPCC_CLIENT_SPSS               16
+
+#endif
index aa765af..85fdce8 100644 (file)
@@ -33,6 +33,7 @@
  * @cache:      log-based polynomial representation buffer
  * @elp:        error locator polynomial
  * @poly_2t:    temporary polynomials of degree 2t
+ * @swap_bits:  swap bits within data and syndrome bytes
  */
 struct bch_control {
        unsigned int    m;
@@ -51,16 +52,18 @@ struct bch_control {
        int            *cache;
        struct gf_poly *elp;
        struct gf_poly *poly_2t[4];
+       bool            swap_bits;
 };
 
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+                            bool swap_bits);
 
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
 
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
                unsigned int len, uint8_t *ecc);
 
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
               const uint8_t *recv_ecc, const uint8_t *calc_ecc,
               const unsigned int *syn, unsigned int *errloc);
 
index d7ee4c6..e2f7252 100644 (file)
@@ -10,7 +10,8 @@
                     + __GNUC_MINOR__ * 100     \
                     + __GNUC_PATCHLEVEL__)
 
-#if GCC_VERSION < 40600
+/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
+#if GCC_VERSION < 40800
 # error Sorry, your compiler is too old - please upgrade it.
 #endif
 
 #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
-#if GCC_VERSION >= 40800
 #define __HAVE_BUILTIN_BSWAP16__
-#endif
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
 
 #if GCC_VERSION >= 70000
index 6325d64..33d3a2e 100644 (file)
@@ -230,60 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 #endif
 
-#include <uapi/linux/types.h>
-
-#define __READ_ONCE_SIZE                                               \
-({                                                                     \
-       switch (size) {                                                 \
-       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
-       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
-       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
-       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
-       default:                                                        \
-               barrier();                                              \
-               __builtin_memcpy((void *)res, (const void *)p, size);   \
-               barrier();                                              \
-       }                                                               \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
-       __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * We can't declare function 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
-#else
-# define __no_kasan_or_inline __always_inline
-#endif
-
-static __no_kasan_or_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
-       __READ_ONCE_SIZE;
-}
-
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
-       switch (size) {
-       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
-       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
-       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
-       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
-       default:
-               barrier();
-               __builtin_memcpy((void *)p, (const void *)res, size);
-               barrier();
-       }
-}
-
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
@@ -293,11 +239,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * statements.
  *
  * These two macros will also work on aggregate data types like structs or
- * unions. If the size of the accessed data type exceeds the word size of
- * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
- * fall back to memcpy(). There's at least two memcpy()s: one for the
- * __builtin_memcpy() and then one for the macro doing the copy of variable
- * - '__u' allocated on the stack.
+ * unions.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
@@ -309,23 +251,69 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 #include <asm/barrier.h>
 #include <linux/kasan-checks.h>
 
-#define __READ_ONCE(x, check)                                          \
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity or dependency ordering guarantees. Note that this may result
+ * in tears!
+ */
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+
+#define __READ_ONCE_SCALAR(x)                                          \
 ({                                                                     \
-       union { typeof(x) __val; char __c[1]; } __u;                    \
-       if (check)                                                      \
-               __read_once_size(&(x), __u.__c, sizeof(x));             \
-       else                                                            \
-               __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
-       smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
-       __u.__val;                                                      \
+       __unqual_scalar_typeof(x) __x = __READ_ONCE(x);                 \
+       smp_read_barrier_depends();                                     \
+       (typeof(x))__x;                                                 \
 })
-#define READ_ONCE(x) __READ_ONCE(x, 1)
 
+#define READ_ONCE(x)                                                   \
+({                                                                     \
+       compiletime_assert_rwonce_type(x);                              \
+       __READ_ONCE_SCALAR(x);                                          \
+})
+
+#define __WRITE_ONCE(x, val)                           \
+do {                                                   \
+       *(volatile typeof(x) *)&(x) = (val);            \
+} while (0)
+
+#define WRITE_ONCE(x, val)                             \
+do {                                                   \
+       compiletime_assert_rwonce_type(x);              \
+       __WRITE_ONCE(x, val);                           \
+} while (0)
+
+#ifdef CONFIG_KASAN
 /*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  */
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+static __no_kasan_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+       return __READ_ONCE(*(unsigned long *)addr);
+}
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN. This is usually
+ * used by unwinding code when walking the stack of a running process.
+ */
+#define READ_ONCE_NOCHECK(x)                                           \
+({                                                                     \
+       unsigned long __x;                                              \
+       compiletime_assert(sizeof(x) == sizeof(__x),                    \
+               "Unsupported access size for READ_ONCE_NOCHECK().");    \
+       __x = __read_once_word_nocheck(&(x));                           \
+       smp_read_barrier_depends();                                     \
+       (typeof(x))__x;                                                 \
+})
 
 static __no_kasan_or_inline
 unsigned long read_word_at_a_time(const void *addr)
@@ -334,14 +322,6 @@ unsigned long read_word_at_a_time(const void *addr)
        return *(unsigned long *)addr;
 }
 
-#define WRITE_ONCE(x, val) \
-({                                                     \
-       union { typeof(x) __val; char __c[1]; } __u =   \
-               { .__val = (__force typeof(x)) (val) }; \
-       __write_once_size(&(x), __u.__c, sizeof(x));    \
-       __u.__val;                                      \
-})
-
 #endif /* __KERNEL__ */
 
 /*
@@ -406,6 +386,16 @@ static inline void *offset_to_ptr(const int *off)
        compiletime_assert(__native_word(t),                            \
                "Need native word sized stores/loads for atomicity.")
 
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t)                                      \
+       compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),  \
+               "Unsupported access size for {READ,WRITE}_ONCE().")
+
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 
index 6fcf732..d4e1956 100644 (file)
@@ -218,6 +218,53 @@ struct ftrace_likely_data {
 /* Are two types/vars the same type (ignoring qualifiers)? */
 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ *                            non-scalar types unchanged.
+ */
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
+/*
+ * We build this out of a couple of helper macros in a vain attempt to
+ * help you keep your lunch down while reading it.
+ */
+#define __pick_scalar_type(x, type, otherwise)                                 \
+       __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
+
+/*
+ * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
+ * so we include the naked type here as well as the signed/unsigned variants.
+ */
+#define __pick_integer_type(x, type, otherwise)                                        \
+       __pick_scalar_type(x, type,                                             \
+               __pick_scalar_type(x, unsigned type,                            \
+                       __pick_scalar_type(x, signed type, otherwise)))
+
+#define __unqual_scalar_typeof(x) typeof(                                      \
+       __pick_integer_type(x, char,                                            \
+               __pick_integer_type(x, short,                                   \
+                       __pick_integer_type(x, int,                             \
+                               __pick_integer_type(x, long,                    \
+                                       __pick_integer_type(x, long long, x))))))
+#else
+/*
+ * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type)                              \
+               unsigned type:  (unsigned type)0,                       \
+               signed type:    (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof(                              \
+               _Generic((x),                                           \
+                        char:  (char)0,                                \
+                        __scalar_type_to_expr_cases(char),             \
+                        __scalar_type_to_expr_cases(short),            \
+                        __scalar_type_to_expr_cases(int),              \
+                        __scalar_type_to_expr_cases(long),             \
+                        __scalar_type_to_expr_cases(long long),        \
+                        default: (x)))
+#endif
+
 /* Is this type a native word size -- useful for atomic operations */
 #define __native_word(t) \
        (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
index 67d5950..3494f67 100644 (file)
@@ -367,7 +367,7 @@ struct cpufreq_driver {
 
        /* platform specific boost support code */
        bool            boost_enabled;
-       int             (*set_boost)(int state);
+       int             (*set_boost)(struct cpufreq_policy *policy, int state);
 };
 
 /* flags */
index 8377afe..191772d 100644 (file)
@@ -102,6 +102,7 @@ enum cpuhp_state {
        CPUHP_AP_IRQ_ARMADA_XP_STARTING,
        CPUHP_AP_IRQ_BCM2836_STARTING,
        CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+       CPUHP_AP_IRQ_RISCV_STARTING,
        CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
        CPUHP_AP_ARM_MVEBU_COHERENCY,
        CPUHP_AP_MICROCODE_LOADER,
index 19ef6c8..8e1f8f9 100644 (file)
@@ -1048,6 +1048,7 @@ struct lock_manager_operations {
        bool (*lm_break)(struct file_lock *);
        int (*lm_change)(struct file_lock *, int, struct list_head *);
        void (*lm_setup)(struct file_lock *, void **);
+       bool (*lm_breaker_owns_lease)(struct file_lock *);
 };
 
 struct lock_manager {
index dfbbf7a..e339dac 100644 (file)
@@ -342,9 +342,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
 
 extern int stack_tracer_enabled;
 
-int stack_trace_sysctl(struct ctl_table *table, int write,
-                      void __user *buffer, size_t *lenp,
-                      loff_t *ppos);
+int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+                      size_t *lenp, loff_t *ppos);
 
 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
 DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644 (file)
index 3614a13..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
-       int vout_gpio;
-       bool wakeup;
-       int (*hw_setup)(struct i2c_client *client);
-       int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
index 9e409bb..3b8580b 100644 (file)
@@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis)
 bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+       input_mt_report_slot_state(dev, 0, false);
+}
+
 void input_mt_report_finger_count(struct input_dev *dev, int count);
 void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
 void input_mt_drop_unused(struct input_dev *dev);
index d8c2904..3a63d98 100644 (file)
@@ -35,6 +35,7 @@ int icc_enable(struct icc_path *path);
 int icc_disable(struct icc_path *path);
 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
 void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
 
 #else
 
@@ -84,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag)
 {
 }
 
+static inline const char *icc_get_name(struct icc_path *path)
+{
+       return NULL;
+}
+
 #endif /* CONFIG_INTERCONNECT */
 
 #endif /* __LINUX_INTERCONNECT_H */
index 12258ea..65b81e0 100644 (file)
@@ -59,6 +59,7 @@ bool kthread_should_stop(void);
 bool kthread_should_park(void);
 bool __kthread_should_park(struct task_struct *k);
 bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
 void *kthread_data(struct task_struct *k);
 void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
index fee7fab..3755158 100644 (file)
@@ -318,6 +318,7 @@ extern void try_offline_node(int nid);
 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
 extern int remove_memory(int nid, u64 start, u64 size);
 extern void __remove_memory(int nid, u64 start, u64 size);
+extern int offline_and_remove_memory(int nid, u64 start, u64 size);
 
 #else
 static inline void try_offline_node(int nid) {}
index 886e304..d890805 100644 (file)
@@ -98,7 +98,7 @@ struct nand_bbt_descr {
 
 /*
  * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
  * in nand_chip.bbt_options.
  */
 #define NAND_BBT_DYNAMICSTRUCT 0x80000000
index c98a211..fd1ecb8 100644 (file)
@@ -138,7 +138,7 @@ struct cfi_ident {
        uint16_t InterfaceDesc;
        uint16_t MaxBufWriteSize;
        uint8_t  NumEraseRegions;
-       uint32_t EraseRegionInfo[0]; /* Not host ordered */
+       uint32_t EraseRegionInfo[]; /* Not host ordered */
 } __packed;
 
 /* Extended Query Structure for both PRI and ALT */
@@ -165,7 +165,7 @@ struct cfi_pri_intelext {
        uint16_t ProtRegAddr;
        uint8_t  FactProtRegSize;
        uint8_t  UserProtRegSize;
-       uint8_t  extra[0];
+       uint8_t  extra[];
 } __packed;
 
 struct cfi_intelext_otpinfo {
@@ -286,7 +286,7 @@ struct cfi_private {
        map_word sector_erase_cmd;
        unsigned long chipshift; /* Because they're of the same type */
        const char *im_name;     /* inter_module name for cmdset_setup */
-       struct flchip chips[0];  /* per-chip data structure for each chip */
+       struct flchip chips[];  /* per-chip data structure for each chip */
 };
 
 uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
index 2d1f4a6..157357e 100644 (file)
@@ -200,6 +200,8 @@ struct mtd_debug_info {
  *
  * @node: list node used to add an MTD partition to the parent partition list
  * @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ *       MTD_SLC_ON_MLC_EMULATION is set
  * @flags: original flags (before the mtdpart logic decided to tweak them based
  *        on flash constraints, like eraseblock/pagesize alignment)
  *
@@ -209,6 +211,7 @@ struct mtd_debug_info {
 struct mtd_part {
        struct list_head node;
        u64 offset;
+       u64 size;
        u32 flags;
 };
 
@@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
 
 static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
 {
-       return mtd->erasesize / mtd->writesize;
+       struct mtd_info *master = mtd_get_master(mtd);
+
+       return master->erasesize / mtd->writesize;
 }
 
 static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
index e545c05..b74a539 100644 (file)
@@ -37,6 +37,7 @@
  *     master MTD flag set for the corresponding MTD partition.
  *     For example, to force a read-only partition, simply adding
  *     MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
  *
  * Note: writeable partitions require their size and offset be
  * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
@@ -48,6 +49,7 @@ struct mtd_partition {
        uint64_t size;                  /* partition size */
        uint64_t offset;                /* offset within the master MTD space */
        uint32_t mask_flags;            /* master MTD flags to mask out for this partition */
+       uint32_t add_flags;             /* flags to add to the partition */
        struct device_node *of_node;
 };
 
index df5b9fd..2e3f437 100644 (file)
@@ -24,7 +24,7 @@ struct lpddr_private {
        struct qinfo_chip *qinfo;
        int numchips;
        unsigned long chipshift;
-       struct flchip chips[0];
+       struct flchip chips[];
 };
 
 /* qinfo_query_info structure contains request information for
index 1e76196..65b1c1c 100644 (file)
@@ -83,14 +83,14 @@ struct nand_chip;
 /*
  * Constants for ECC_MODES
  */
-typedef enum {
+enum nand_ecc_mode {
+       NAND_ECC_INVALID,
        NAND_ECC_NONE,
        NAND_ECC_SOFT,
        NAND_ECC_HW,
        NAND_ECC_HW_SYNDROME,
-       NAND_ECC_HW_OOB_FIRST,
        NAND_ECC_ON_DIE,
-} nand_ecc_modes_t;
+};
 
 enum nand_ecc_algo {
        NAND_ECC_UNKNOWN,
@@ -118,86 +118,74 @@ enum nand_ecc_algo {
 #define NAND_ECC_GENERIC_ERASED_CHECK  BIT(0)
 #define NAND_ECC_MAXIMIZE              BIT(1)
 
+/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16       BIT(1)
+
 /*
  * When using software implementation of Hamming, we can specify which byte
  * ordering should be used.
  */
 #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
 
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
- */
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16       0x00000002
 /* Chip has cache program function */
-#define NAND_CACHEPRG          0x00000008
+#define NAND_CACHEPRG          BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
 /*
  * Chip requires ready check on read (for auto-incremented sequential read).
  * True only for small page devices; large page devices do not support
  * autoincrement.
  */
-#define NAND_NEED_READRDY      0x00000100
+#define NAND_NEED_READRDY      BIT(8)
 
 /* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE  0x00000200
+#define NAND_NO_SUBPAGE_WRITE  BIT(9)
 
 /* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD         0x00000400
+#define NAND_BROKEN_XD         BIT(10)
 
 /* Device behaves just like nand, but is readonly */
-#define NAND_ROM               0x00000800
+#define NAND_ROM               BIT(11)
 
 /* Device supports subpage reads */
-#define NAND_SUBPAGE_READ      0x00001000
+#define NAND_SUBPAGE_READ      BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
 
 /*
  * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
  * patterns.
  */
-#define NAND_NEED_SCRAMBLING   0x00002000
+#define NAND_NEED_SCRAMBLING   BIT(13)
 
 /* Device needs 3rd row address cycle */
-#define NAND_ROW_ADDR_3                0x00004000
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
-
-/* Macros to identify the above */
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-
-/*
- * There are different places where the manufacturer stores the factory bad
- * block markers.
- *
- * Position within the block: Each of these pages needs to be checked for a
- * bad block marking pattern.
- */
-#define NAND_BBM_FIRSTPAGE             0x01000000
-#define NAND_BBM_SECONDPAGE            0x02000000
-#define NAND_BBM_LASTPAGE              0x04000000
-
-/* Position within the OOB data of the page */
-#define NAND_BBM_POS_SMALL             5
-#define NAND_BBM_POS_LARGE             0
+#define NAND_ROW_ADDR_3                BIT(14)
 
 /* Non chip related options */
 /* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN      0x00010000
+#define NAND_SKIP_BBTSCAN      BIT(16)
 /* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
 /*
  * Autodetect nand buswidth with readid/onfi.
  * This suppose the driver will configure the hardware in 8 bits mode
  * when calling nand_scan_ident, and update its configuration
  * before calling nand_scan_tail.
  */
-#define NAND_BUSWIDTH_AUTO      0x00080000
+#define NAND_BUSWIDTH_AUTO      BIT(19)
+
 /*
  * This option could be defined by controller drivers to protect against
  * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
  */
-#define NAND_USE_BOUNCE_BUFFER 0x00100000
+#define NAND_USES_DMA          BIT(20)
 
 /*
  * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
@@ -207,26 +195,49 @@ enum nand_ecc_algo {
  * If your controller already takes care of this delay, you don't need to set
  * this flag.
  */
-#define NAND_WAIT_TCCS         0x00200000
+#define NAND_WAIT_TCCS         BIT(21)
 
 /*
  * Whether the NAND chip is a boot medium. Drivers might use this information
  * to select ECC algorithms supported by the boot ROM or similar restrictions.
  */
-#define NAND_IS_BOOT_MEDIUM    0x00400000
+#define NAND_IS_BOOT_MEDIUM    BIT(22)
 
 /*
  * Do not try to tweak the timings at runtime. This is needed when the
  * controller initializes the timings on itself or when it relies on
  * configuration done by the bootloader.
  */
-#define NAND_KEEP_TIMINGS      0x00800000
+#define NAND_KEEP_TIMINGS      BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE     BIT(24)
+#define NAND_BBM_SECONDPAGE    BIT(25)
+#define NAND_BBM_LASTPAGE      BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK      BIT(27)
 
 /* Cell info constants */
 #define NAND_CI_CHIPNR_MSK     0x03
 #define NAND_CI_CELLTYPE_MSK   0x0C
 #define NAND_CI_CELLTYPE_SHIFT 2
 
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL             5
+#define NAND_BBM_POS_LARGE             0
+
 /**
  * struct nand_parameters - NAND generic parameters from the parameter page
  * @model: Model name
@@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = {                        \
  * @write_oob: function to write chip OOB data
  */
 struct nand_ecc_ctrl {
-       nand_ecc_modes_t mode;
+       enum nand_ecc_mode mode;
        enum nand_ecc_algo algo;
        int steps;
        int size;
@@ -491,13 +502,17 @@ enum nand_data_interface_type {
 /**
  * struct nand_data_interface - NAND interface timing
  * @type:       type of the timing
- * @timings:    The timing, type according to @type
+ * @timings:    The timing information
+ * @timings.mode: Timing mode as defined in the specification
  * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
  */
 struct nand_data_interface {
        enum nand_data_interface_type type;
-       union {
-               struct nand_sdr_timings sdr;
+       struct nand_timings {
+               unsigned int mode;
+               union {
+                       struct nand_sdr_timings sdr;
+               };
        } timings;
 };
 
@@ -694,6 +709,7 @@ struct nand_op_instr {
 
 /**
  * struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
  * @instrs: array of instructions
  * @ninstrs: length of the @instrs array
  * @first_instr_start_off: offset to start from for the first instruction
@@ -709,6 +725,7 @@ struct nand_op_instr {
  * controller driver.
  */
 struct nand_subop {
+       unsigned int cs;
        const struct nand_op_instr *instrs;
        unsigned int ninstrs;
        unsigned int first_instr_start_off;
@@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page);
 int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
                                  u8 *subfeature_param);
 
-/* Default read_page_raw implementation */
+/* read_page_raw implementations */
 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
                       int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+                                 int oob_required, int page);
 
-/* Default write_page_raw implementation */
+/* write_page_raw implementations */
 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
                        int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+                                  int oob_required, int page);
 
 /* Reset and initialize a NAND device */
 int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
                                unsigned int offset_in_page, const void *buf,
                                unsigned int len, bool force_8bit);
 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
-                     bool force_8bit);
+                     bool force_8bit, bool check_only);
 int nand_write_data_op(struct nand_chip *chip, const void *buf,
                       unsigned int len, bool force_8bit);
 
@@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip);
  * sucessful nand_scan().
  */
 void nand_cleanup(struct nand_chip *chip);
-/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct nand_chip *chip);
 
 /*
  * External helper for controller drivers that have to implement the WAITRDY
@@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
 void nand_select_target(struct nand_chip *chip, unsigned int cs);
 void nand_deselect_target(struct nand_chip *chip);
 
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+                      unsigned int src_off, unsigned int nbits);
+
 /**
  * nand_get_data_buf() - Get the internal page buffer
  * @chip: NAND chip object
index 1e2af0e..60bac2c 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 /* Flash opcodes. */
+#define SPINOR_OP_WRDI         0x04    /* Write disable */
 #define SPINOR_OP_WREN         0x06    /* Write enable */
 #define SPINOR_OP_RDSR         0x05    /* Read status register */
 #define SPINOR_OP_WRSR         0x01    /* Write status register 1 byte */
@@ -80,7 +81,6 @@
 
 /* Used for SST flashes only. */
 #define SPINOR_OP_BP           0x02    /* Byte program */
-#define SPINOR_OP_WRDI         0x04    /* Write disable */
 #define SPINOR_OP_AAI_WP       0xad    /* Auto address increment word program */
 
 /* Used for S3AN flashes only */
@@ -302,7 +302,7 @@ struct spi_nor;
  * @read:              read data from the SPI NOR.
  * @write:             write data to the SPI NOR.
  * @erase:             erase a sector of the SPI NOR at the offset @offs; if
- *                     not provided by the driver, spi-nor will send the erase
+ *                     not provided by the driver, SPI NOR will send the erase
  *                     opcode via write_reg().
  */
 struct spi_nor_controller_ops {
@@ -327,16 +327,16 @@ struct spi_nor_manufacturer;
 struct spi_nor_flash_parameter;
 
 /**
- * struct spi_nor - Structure for defining the SPI NOR layer
- * @mtd:               point to a mtd_info structure
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd:               an mtd_info structure
  * @lock:              the lock for the read/write/erase/lock/unlock operations
- * @dev:               point to a spi device, or a spi nor controller device.
- * @spimem:            point to the spi mem device
+ * @dev:               pointer to an SPI device or an SPI NOR controller device
+ * @spimem:            pointer to the SPI memory device
  * @bouncebuf:         bounce buffer used when the buffer passed by the MTD
  *                      layer is not DMA-able
  * @bouncebuf_size:    size of the bounce buffer
- * @info:              spi-nor part JDEC MFR id and other info
- * @manufacturer:      spi-nor manufacturer
+ * @info:              SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer:      SPI NOR manufacturer
  * @page_size:         the page size of the SPI NOR
  * @addr_width:                number of address bytes
  * @erase_opcode:      the opcode for erasing a sector
@@ -344,17 +344,17 @@ struct spi_nor_flash_parameter;
  * @read_dummy:                the dummy needed by the read operation
  * @program_opcode:    the program opcode
  * @sst_write_second:  used by the SST write operation
- * @flags:             flag options for the current SPI-NOR (SNOR_F_*)
+ * @flags:             flag options for the current SPI NOR (SNOR_F_*)
  * @read_proto:                the SPI protocol for read operations
  * @write_proto:       the SPI protocol for write operations
- * @reg_proto          the SPI protocol for read_reg/write_reg/erase operations
+ * @reg_proto:         the SPI protocol for read_reg/write_reg/erase operations
  * @controller_ops:    SPI NOR controller driver specific operations.
- * @params:            [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
+ * @params:            [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
  *                      The structure includes legacy flash parameters and
  *                      settings that can be overwritten by the spi_nor_fixups
  *                      hooks, or dynamically when parsing the SFDP tables.
  * @dirmap:            pointers to struct spi_mem_dirmap_desc for reads/writes.
- * @priv:              the private data
+ * @priv:              pointer to the private data
  */
 struct spi_nor {
        struct mtd_info         mtd;
index 82d8fb4..4dba3c9 100644 (file)
@@ -38,7 +38,7 @@ struct nfs4_ace {
 
 struct nfs4_acl {
        uint32_t        naces;
-       struct nfs4_ace aces[0];
+       struct nfs4_ace aces[];
 };
 
 #define NFS4_MAXLABELLEN       2048
@@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err)
        case NFS4ERR_NOFILEHANDLE:
        case NFS4ERR_MOVED:
                return false;
-       };
+       }
        return true;
 }
 
index 73eda45..6ee9119 100644 (file)
@@ -230,6 +230,7 @@ struct nfs4_copy_state {
 #define NFS_INO_INVALID_OTHER  BIT(12)         /* other attrs are invalid */
 #define NFS_INO_DATA_INVAL_DEFER       \
                                BIT(13)         /* Deferred cache invalidation */
+#define NFS_INO_INVALID_BLOCKS BIT(14)         /* cached blocks are invalid */
 
 #define NFS_INO_INVALID_ATTR   (NFS_INO_INVALID_CHANGE \
                | NFS_INO_INVALID_CTIME \
index e5f3e7d..5fd0a9e 100644 (file)
@@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 {
 
 struct nfs4_secinfo_flavors {
        unsigned int            num_flavors;
-       struct nfs4_secinfo4    flavors[0];
+       struct nfs4_secinfo4    flavors[];
 };
 
 struct nfs4_secinfo_arg {
index 222f6f7..6be1aa5 100644 (file)
@@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy)
  * not onlined when onlining the section).
  * The content of these pages is effectively stale. Such pages should not
  * be touched (read/write/dump/save) except by their owner.
+ *
+ * If a driver wants to allow to offline unmovable PageOffline() pages without
+ * putting them back to the buddy, it can do so via the memory notifier by
+ * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
+ * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
+ * pages (now with a reference count of zero) are treated like free pages,
+ * allowing the containing memory block to get offlined. A driver that
+ * relies on this feature is aware that re-onlining the memory block will
+ * require to re-set the pages PageOffline() and not giving them to the
+ * buddy via online_page_callback_t.
  */
 PAGE_TYPE_OPS(Offline, offline)
 
index 08e639e..03e92c7 100644 (file)
@@ -68,7 +68,7 @@ struct davinci_nand_pdata {           /* platform_data */
         * Newer ones also support 4-bit ECC, but are awkward
         * using it with large page chips.
         */
-       nand_ecc_modes_t        ecc_mode;
+       enum nand_ecc_mode      ecc_mode;
        u8                      ecc_bits;
 
        /* e.g. NAND_BUSWIDTH_16 */
index deb849b..08675b1 100644 (file)
@@ -49,7 +49,7 @@ struct s3c2410_platform_nand {
 
        unsigned int    ignore_unset_ecc:1;
 
-       nand_ecc_modes_t        ecc_mode;
+       enum nand_ecc_mode      ecc_mode;
 
        int                     nr_sets;
        struct s3c2410_nand_set *sets;
index 7478618..d5c4a32 100644 (file)
@@ -41,6 +41,18 @@ struct dev_pm_opp_supply {
        unsigned long u_amp;
 };
 
+/**
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg:       Average bandwidth corresponding to this OPP (in icc units)
+ * @peak:      Peak bandwidth corresponding to this OPP (in icc units)
+ *
+ * This structure stores the bandwidth values for a single interconnect path.
+ */
+struct dev_pm_opp_icc_bw {
+       u32 avg;
+       u32 peak;
+};
+
 /**
  * struct dev_pm_opp_info - OPP freq/voltage/current values
  * @rate:      Target clk rate in hz
@@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
 struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
 struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
 int of_get_required_opp_performance_state(struct device_node *np, int index);
+int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
 void dev_pm_opp_of_register_em(struct cpumask *cpus);
 #else
 static inline int dev_pm_opp_of_add_table(struct device *dev)
@@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
 {
        return -ENOTSUPP;
 }
+
+static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
+{
+       return -ENOTSUPP;
+}
 #endif
 
 #endif         /* __LINUX_OPP_H__ */
index bf02437..46d6ae6 100644 (file)
@@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos,
                if (*kbuf) {
                        memset(*kbuf, 0, copy);
                        *kbuf += copy;
-               } else if (__clear_user(*ubuf, copy))
+               } else if (clear_user(*ubuf, copy))
                        return -EFAULT;
                else
                        *ubuf += copy;
index 4f6b284..98da816 100644 (file)
@@ -76,7 +76,7 @@ struct rpc_auth {
        unsigned int            au_verfsize;    /* size of reply verifier */
        unsigned int            au_ralign;      /* words before UL header */
 
-       unsigned int            au_flags;
+       unsigned long           au_flags;
        const struct rpc_authops *au_ops;
        rpc_authflavor_t        au_flavor;      /* pseudoflavor (note may
                                                 * differ from the flavor in
@@ -89,7 +89,8 @@ struct rpc_auth {
 };
 
 /* rpc_auth au_flags */
-#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+#define RPCAUTH_AUTH_DATATOUCH         (1)
+#define RPCAUTH_AUTH_UPDATE_SLACK      (2)
 
 struct rpc_auth_create_args {
        rpc_authflavor_t pseudoflavor;
index bc07e51..bf4ac8a 100644 (file)
@@ -84,6 +84,7 @@ struct pf_desc {
        u32     service;
        char    *name;
        char    *auth_domain_name;
+       struct auth_domain *domain;
        bool    datatouch;
 };
 
index fd39089..386628b 100644 (file)
@@ -254,6 +254,7 @@ struct svc_rqst {
        struct page *           *rq_page_end;  /* one past the last page */
 
        struct kvec             rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
+       struct bio_vec          rq_bvec[RPCSVC_MAXPAGES];
 
        __be32                  rq_xid;         /* transmission id */
        u32                     rq_prog;        /* program number */
@@ -299,6 +300,7 @@ struct svc_rqst {
        struct net              *rq_bc_net;     /* pointer to backchannel's
                                                 * net namespace
                                                 */
+       void **                 rq_lease_breaker; /* The v4 client breaking a lease */
 };
 
 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
index cbcfbd0..7ed8262 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/sunrpc/rpc_rdma.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
-#define SVCRDMA_DEBUG
 
 /* Default and maximum inline threshold sizes */
 enum {
@@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt {
 };
 
 /* svc_rdma_backchannel.c */
-extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
-                                   __be32 *rdma_resp,
-                                   struct xdr_buf *rcvbuf);
+extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+                                    struct svc_rdma_recv_ctxt *rctxt);
 
 /* svc_rdma_recvfrom.c */
 extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
index 9e1e046..aca35ab 100644 (file)
@@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u
        return 0;
 }
 
+static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt)
+{
+       return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) ||
+               (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0);
+}
+
 int    svc_reg_xprt_class(struct svc_xprt_class *);
 void   svc_unreg_xprt_class(struct svc_xprt_class *);
 void   svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
index ca39a38..f09c82b 100644 (file)
@@ -20,7 +20,8 @@ int gss_svc_init(void);
 void gss_svc_shutdown(void);
 int gss_svc_init_net(struct net *net);
 void gss_svc_shutdown_net(struct net *net);
-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
+                                                     char *name);
 u32 svcauth_gss_flavor(struct auth_domain *dom);
 
 #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
index 771baad..b7ac7fe 100644 (file)
@@ -28,7 +28,7 @@ struct svc_sock {
 
        /* private TCP part */
        /* On-the-wire fragment header: */
-       __be32                  sk_reclen;
+       __be32                  sk_marker;
        /* As we receive a record, this includes the length received so
         * far (including the fragment header): */
        u32                     sk_tcplen;
@@ -41,12 +41,12 @@ struct svc_sock {
 
 static inline u32 svc_sock_reclen(struct svc_sock *svsk)
 {
-       return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+       return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK;
 }
 
 static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
 {
-       return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+       return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT;
 }
 
 /*
index 5453af8..239db79 100644 (file)
@@ -17,6 +17,16 @@ struct vdpa_callback {
        void *private;
 };
 
+/**
+ * vDPA notification area
+ * @addr: base address of the notification area
+ * @size: size of the notification area
+ */
+struct vdpa_notification_area {
+       resource_size_t addr;
+       resource_size_t size;
+};
+
 /**
  * vDPA device - representation of a vDPA device
  * @dev: underlying device
@@ -73,6 +83,10 @@ struct vdpa_device {
  *                             @vdev: vdpa device
  *                             @idx: virtqueue index
  *                             Returns virtqueue state (last_avail_idx)
+ * @get_vq_notification:       Get the notification area for a virtqueue
+ *                             @vdev: vdpa device
+ *                             @idx: virtqueue index
+ *                             Returns the notifcation area
  * @get_vq_align:              Get the virtqueue align requirement
  *                             for the device
  *                             @vdev: vdpa device
@@ -162,6 +176,8 @@ struct vdpa_config_ops {
        bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
        int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
        u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
+       struct vdpa_notification_area
+       (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
 
        /* Device ops */
        u32 (*get_vq_align)(struct vdpa_device *vdev);
index 9e2763d..59bd50f 100644 (file)
@@ -105,9 +105,9 @@ struct vringh_kiov {
 /* Helpers for userspace vrings. */
 int vringh_init_user(struct vringh *vrh, u64 features,
                     unsigned int num, bool weak_barriers,
-                    struct vring_desc __user *desc,
-                    struct vring_avail __user *avail,
-                    struct vring_used __user *used);
+                    vring_desc_t __user *desc,
+                    vring_avail_t __user *avail,
+                    vring_used_t __user *used);
 
 static inline void vringh_iov_init(struct vringh_iov *iov,
                                   struct iovec *iovec, unsigned num)
index e147677..81ee175 100644 (file)
@@ -392,13 +392,12 @@ void *neigh_seq_next(struct seq_file *, void *, loff_t *);
 void neigh_seq_stop(struct seq_file *, void *);
 
 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
-                       void __user *buffer, size_t *lenp, loff_t *ppos);
+                       void *buffer, size_t *lenp, loff_t *ppos);
 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
-                               void __user *buffer,
+                               void *buffer,
                                size_t *lenp, loff_t *ppos);
 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
-                                  void __user *buffer,
-                                  size_t *lenp, loff_t *ppos);
+                                  void *buffer, size_t *lenp, loff_t *ppos);
 
 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                          proc_handler *proc_handler);
index 32d88c4..b9b51a4 100644 (file)
  ** GSS-API related trace events
  **/
 
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_NONE);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_INTEGRITY);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_PRIVACY);
+
+#define show_gss_service(x)                                            \
+       __print_symbolic(x,                                             \
+               { RPC_GSS_SVC_NONE,             "none" },               \
+               { RPC_GSS_SVC_INTEGRITY,        "integrity" },          \
+               { RPC_GSS_SVC_PRIVACY,          "privacy" })
+
 TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
 TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
 TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
@@ -126,6 +136,40 @@ DEFINE_GSSAPI_EVENT(verify_mic);
 DEFINE_GSSAPI_EVENT(wrap);
 DEFINE_GSSAPI_EVENT(unwrap);
 
+DECLARE_EVENT_CLASS(rpcgss_ctx_class,
+       TP_PROTO(
+               const struct gss_cred *gc
+       ),
+
+       TP_ARGS(gc),
+
+       TP_STRUCT__entry(
+               __field(const void *, cred)
+               __field(unsigned long, service)
+               __string(principal, gc->gc_principal)
+       ),
+
+       TP_fast_assign(
+               __entry->cred = gc;
+               __entry->service = gc->gc_service;
+               __assign_str(principal, gc->gc_principal)
+       ),
+
+       TP_printk("cred=%p service=%s principal='%s'",
+               __entry->cred, show_gss_service(__entry->service),
+               __get_str(principal))
+);
+
+#define DEFINE_CTX_EVENT(name)                                         \
+       DEFINE_EVENT(rpcgss_ctx_class, rpcgss_ctx_##name,               \
+                       TP_PROTO(                                       \
+                               const struct gss_cred *gc               \
+                       ),                                              \
+                       TP_ARGS(gc))
+
+DEFINE_CTX_EVENT(init);
+DEFINE_CTX_EVENT(destroy);
+
 TRACE_EVENT(rpcgss_svc_accept_upcall,
        TP_PROTO(
                __be32 xid,
@@ -291,6 +335,40 @@ TRACE_EVENT(rpcgss_need_reencode,
                __entry->ret ? "" : "un")
 );
 
+TRACE_EVENT(rpcgss_update_slack,
+       TP_PROTO(
+               const struct rpc_task *task,
+               const struct rpc_auth *auth
+       ),
+
+       TP_ARGS(task, auth),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, task_id)
+               __field(unsigned int, client_id)
+               __field(u32, xid)
+               __field(const void *, auth)
+               __field(unsigned int, rslack)
+               __field(unsigned int, ralign)
+               __field(unsigned int, verfsize)
+       ),
+
+       TP_fast_assign(
+               __entry->task_id = task->tk_pid;
+               __entry->client_id = task->tk_client->cl_clid;
+               __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+               __entry->auth = auth;
+               __entry->rslack = auth->au_rslack;
+               __entry->ralign = auth->au_ralign;
+               __entry->verfsize = auth->au_verfsize;
+       ),
+
+       TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
+               __entry->task_id, __entry->client_id, __entry->xid,
+               __entry->auth, __entry->rslack, __entry->ralign,
+               __entry->verfsize)
+);
+
 DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
        TP_PROTO(
                __be32 xid,
@@ -371,6 +449,7 @@ TRACE_EVENT(rpcgss_upcall_result,
 
 TRACE_EVENT(rpcgss_context,
        TP_PROTO(
+               u32 window_size,
                unsigned long expiry,
                unsigned long now,
                unsigned int timeout,
@@ -378,12 +457,13 @@ TRACE_EVENT(rpcgss_context,
                const u8 *data
        ),
 
-       TP_ARGS(expiry, now, timeout, len, data),
+       TP_ARGS(window_size, expiry, now, timeout, len, data),
 
        TP_STRUCT__entry(
                __field(unsigned long, expiry)
                __field(unsigned long, now)
                __field(unsigned int, timeout)
+               __field(u32, window_size)
                __field(int, len)
                __string(acceptor, data)
        ),
@@ -392,13 +472,14 @@ TRACE_EVENT(rpcgss_context,
                __entry->expiry = expiry;
                __entry->now = now;
                __entry->timeout = timeout;
+               __entry->window_size = window_size;
                __entry->len = len;
                strncpy(__get_str(acceptor), data, len);
        ),
 
-       TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
-               __entry->expiry, __entry->now, __entry->timeout,
-               __entry->len, __get_str(acceptor))
+       TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+               __entry->window_size, __entry->expiry, __entry->now,
+               __entry->timeout, __entry->len, __get_str(acceptor))
 );
 
 
index 132c3c7..0f05a6e 100644 (file)
@@ -380,12 +380,8 @@ TRACE_EVENT(xprtrdma_inline_thresh,
 
 DEFINE_CONN_EVENT(connect);
 DEFINE_CONN_EVENT(disconnect);
-DEFINE_CONN_EVENT(flush_dct);
 
-DEFINE_RXPRT_EVENT(xprtrdma_create);
-DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
-DEFINE_RXPRT_EVENT(xprtrdma_op_close);
 DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
 
 TRACE_EVENT(xprtrdma_op_connect,
@@ -1279,38 +1275,42 @@ TRACE_EVENT(xprtrdma_leaked_rep,
  ** Server-side RPC/RDMA events
  **/
 
-DECLARE_EVENT_CLASS(svcrdma_xprt_event,
+DECLARE_EVENT_CLASS(svcrdma_accept_class,
        TP_PROTO(
-               const struct svc_xprt *xprt
+               const struct svcxprt_rdma *rdma,
+               long status
        ),
 
-       TP_ARGS(xprt),
+       TP_ARGS(rdma, status),
 
        TP_STRUCT__entry(
-               __field(const void *, xprt)
-               __string(addr, xprt->xpt_remotebuf)
+               __field(long, status)
+               __string(addr, rdma->sc_xprt.xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xprt = xprt;
-               __assign_str(addr, xprt->xpt_remotebuf);
+               __entry->status = status;
+               __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
        ),
 
-       TP_printk("xprt=%p addr=%s",
-               __entry->xprt, __get_str(addr)
+       TP_printk("addr=%s status=%ld",
+               __get_str(addr), __entry->status
        )
 );
 
-#define DEFINE_XPRT_EVENT(name)                                                \
-               DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,   \
-                               TP_PROTO(                               \
-                                       const struct svc_xprt *xprt     \
-                               ),                                      \
-                               TP_ARGS(xprt))
+#define DEFINE_ACCEPT_EVENT(name) \
+               DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
+                               TP_PROTO( \
+                                       const struct svcxprt_rdma *rdma, \
+                                       long status \
+                               ), \
+                               TP_ARGS(rdma, status))
 
-DEFINE_XPRT_EVENT(accept);
-DEFINE_XPRT_EVENT(fail);
-DEFINE_XPRT_EVENT(free);
+DEFINE_ACCEPT_EVENT(pd);
+DEFINE_ACCEPT_EVENT(qp);
+DEFINE_ACCEPT_EVENT(fabric);
+DEFINE_ACCEPT_EVENT(initdepth);
+DEFINE_ACCEPT_EVENT(accept);
 
 TRACE_DEFINE_ENUM(RDMA_MSG);
 TRACE_DEFINE_ENUM(RDMA_NOMSG);
@@ -1355,7 +1355,7 @@ TRACE_EVENT(svcrdma_decode_rqst,
                show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
 );
 
-TRACE_EVENT(svcrdma_decode_short,
+TRACE_EVENT(svcrdma_decode_short_err,
        TP_PROTO(
                unsigned int hdrlen
        ),
@@ -1399,7 +1399,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
 );
 
 #define DEFINE_BADREQ_EVENT(name)                                      \
-               DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
+               DEFINE_EVENT(svcrdma_badreq_event,                      \
+                            svcrdma_decode_##name##_err,               \
                                TP_PROTO(                               \
                                        __be32 *p                       \
                                ),                                      \
@@ -1583,28 +1584,117 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
 DEFINE_SVC_DMA_EVENT(dma_map_page);
 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
 
-TRACE_EVENT(svcrdma_dma_map_rwctx,
+TRACE_EVENT(svcrdma_dma_map_rw_err,
        TP_PROTO(
                const struct svcxprt_rdma *rdma,
+               unsigned int nents,
                int status
        ),
 
-       TP_ARGS(rdma, status),
+       TP_ARGS(rdma, nents, status),
 
        TP_STRUCT__entry(
                __field(int, status)
+               __field(unsigned int, nents)
                __string(device, rdma->sc_cm_id->device->name)
                __string(addr, rdma->sc_xprt.xpt_remotebuf)
        ),
 
        TP_fast_assign(
                __entry->status = status;
+               __entry->nents = nents;
+               __assign_str(device, rdma->sc_cm_id->device->name);
+               __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s device=%s nents=%u status=%d",
+               __get_str(addr), __get_str(device), __entry->nents,
+               __entry->status
+       )
+);
+
+TRACE_EVENT(svcrdma_no_rwctx_err,
+       TP_PROTO(
+               const struct svcxprt_rdma *rdma,
+               unsigned int num_sges
+       ),
+
+       TP_ARGS(rdma, num_sges),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, num_sges)
+               __string(device, rdma->sc_cm_id->device->name)
+               __string(addr, rdma->sc_xprt.xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->num_sges = num_sges;
+               __assign_str(device, rdma->sc_cm_id->device->name);
+               __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s device=%s num_sges=%d",
+               __get_str(addr), __get_str(device), __entry->num_sges
+       )
+);
+
+TRACE_EVENT(svcrdma_page_overrun_err,
+       TP_PROTO(
+               const struct svcxprt_rdma *rdma,
+               const struct svc_rqst *rqst,
+               unsigned int pageno
+       ),
+
+       TP_ARGS(rdma, rqst, pageno),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, pageno)
+               __field(u32, xid)
+               __string(device, rdma->sc_cm_id->device->name)
+               __string(addr, rdma->sc_xprt.xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->pageno = pageno;
+               __entry->xid = __be32_to_cpu(rqst->rq_xid);
+               __assign_str(device, rdma->sc_cm_id->device->name);
+               __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
+               __get_str(device), __entry->xid, __entry->pageno
+       )
+);
+
+TRACE_EVENT(svcrdma_small_wrch_err,
+       TP_PROTO(
+               const struct svcxprt_rdma *rdma,
+               unsigned int remaining,
+               unsigned int seg_no,
+               unsigned int num_segs
+       ),
+
+       TP_ARGS(rdma, remaining, seg_no, num_segs),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, remaining)
+               __field(unsigned int, seg_no)
+               __field(unsigned int, num_segs)
+               __string(device, rdma->sc_cm_id->device->name)
+               __string(addr, rdma->sc_xprt.xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->remaining = remaining;
+               __entry->seg_no = seg_no;
+               __entry->num_segs = num_segs;
                __assign_str(device, rdma->sc_cm_id->device->name);
                __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
        ),
 
-       TP_printk("addr=%s device=%s status=%d",
-               __get_str(addr), __get_str(device), __entry->status
+       TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
+               __get_str(addr), __get_str(device), __entry->remaining,
+               __entry->seg_no, __entry->num_segs
        )
 );
 
index ffd2215..6a12935 100644 (file)
 #include <linux/net.h>
 #include <linux/tracepoint.h>
 
-DECLARE_EVENT_CLASS(xdr_buf_class,
+TRACE_DEFINE_ENUM(SOCK_STREAM);
+TRACE_DEFINE_ENUM(SOCK_DGRAM);
+TRACE_DEFINE_ENUM(SOCK_RAW);
+TRACE_DEFINE_ENUM(SOCK_RDM);
+TRACE_DEFINE_ENUM(SOCK_SEQPACKET);
+TRACE_DEFINE_ENUM(SOCK_DCCP);
+TRACE_DEFINE_ENUM(SOCK_PACKET);
+
+#define show_socket_type(type)                                 \
+       __print_symbolic(type,                                  \
+               { SOCK_STREAM,          "STREAM" },             \
+               { SOCK_DGRAM,           "DGRAM" },              \
+               { SOCK_RAW,             "RAW" },                \
+               { SOCK_RDM,             "RDM" },                \
+               { SOCK_SEQPACKET,       "SEQPACKET" },          \
+               { SOCK_DCCP,            "DCCP" },               \
+               { SOCK_PACKET,          "PACKET" })
+
+/* This list is known to be incomplete, add new enums as needed. */
+TRACE_DEFINE_ENUM(AF_UNSPEC);
+TRACE_DEFINE_ENUM(AF_UNIX);
+TRACE_DEFINE_ENUM(AF_LOCAL);
+TRACE_DEFINE_ENUM(AF_INET);
+TRACE_DEFINE_ENUM(AF_INET6);
+
+#define rpc_show_address_family(family)                                \
+       __print_symbolic(family,                                \
+               { AF_UNSPEC,            "AF_UNSPEC" },          \
+               { AF_UNIX,              "AF_UNIX" },            \
+               { AF_LOCAL,             "AF_LOCAL" },           \
+               { AF_INET,              "AF_INET" },            \
+               { AF_INET6,             "AF_INET6" })
+
+DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
        TP_PROTO(
+               const struct rpc_task *task,
                const struct xdr_buf *xdr
        ),
 
-       TP_ARGS(xdr),
+       TP_ARGS(task, xdr),
 
        TP_STRUCT__entry(
+               __field(unsigned int, task_id)
+               __field(unsigned int, client_id)
                __field(const void *, head_base)
                __field(size_t, head_len)
                __field(const void *, tail_base)
@@ -31,6 +67,8 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
        ),
 
        TP_fast_assign(
+               __entry->task_id = task->tk_pid;
+               __entry->client_id = task->tk_client->cl_clid;
                __entry->head_base = xdr->head[0].iov_base;
                __entry->head_len = xdr->head[0].iov_len;
                __entry->tail_base = xdr->tail[0].iov_base;
@@ -39,23 +77,137 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
                __entry->msg_len = xdr->len;
        ),
 
-       TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+       TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+               __entry->task_id, __entry->client_id,
                __entry->head_base, __entry->head_len, __entry->page_len,
                __entry->tail_base, __entry->tail_len, __entry->msg_len
        )
 );
 
-#define DEFINE_XDRBUF_EVENT(name)                                      \
-               DEFINE_EVENT(xdr_buf_class, name,                       \
+#define DEFINE_RPCXDRBUF_EVENT(name)                                   \
+               DEFINE_EVENT(rpc_xdr_buf_class,                         \
+                               rpc_xdr_##name,                         \
                                TP_PROTO(                               \
+                                       const struct rpc_task *task,    \
                                        const struct xdr_buf *xdr       \
                                ),                                      \
-                               TP_ARGS(xdr))
+                               TP_ARGS(task, xdr))
+
+DEFINE_RPCXDRBUF_EVENT(sendto);
+DEFINE_RPCXDRBUF_EVENT(recvfrom);
+DEFINE_RPCXDRBUF_EVENT(reply_pages);
+
+
+DECLARE_EVENT_CLASS(rpc_clnt_class,
+       TP_PROTO(
+               const struct rpc_clnt *clnt
+       ),
+
+       TP_ARGS(clnt),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, client_id)
+       ),
+
+       TP_fast_assign(
+               __entry->client_id = clnt->cl_clid;
+       ),
+
+       TP_printk("clid=%u", __entry->client_id)
+);
+
+#define DEFINE_RPC_CLNT_EVENT(name)                                    \
+               DEFINE_EVENT(rpc_clnt_class,                            \
+                               rpc_clnt_##name,                        \
+                               TP_PROTO(                               \
+                                       const struct rpc_clnt *clnt     \
+                               ),                                      \
+                               TP_ARGS(clnt))
+
+DEFINE_RPC_CLNT_EVENT(free);
+DEFINE_RPC_CLNT_EVENT(killall);
+DEFINE_RPC_CLNT_EVENT(shutdown);
+DEFINE_RPC_CLNT_EVENT(release);
+DEFINE_RPC_CLNT_EVENT(replace_xprt);
+DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+
+TRACE_EVENT(rpc_clnt_new,
+       TP_PROTO(
+               const struct rpc_clnt *clnt,
+               const struct rpc_xprt *xprt,
+               const char *program,
+               const char *server
+       ),
+
+       TP_ARGS(clnt, xprt, program, server),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, client_id)
+               __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+               __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+               __string(program, program)
+               __string(server, server)
+       ),
+
+       TP_fast_assign(
+               __entry->client_id = clnt->cl_clid;
+               __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+               __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+               __assign_str(program, program)
+               __assign_str(server, server)
+       ),
+
+       TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
+               __entry->client_id, __get_str(addr), __get_str(port),
+               __get_str(program), __get_str(server))
+);
+
+TRACE_EVENT(rpc_clnt_new_err,
+       TP_PROTO(
+               const char *program,
+               const char *server,
+               int error
+       ),
+
+       TP_ARGS(program, server, error),
+
+       TP_STRUCT__entry(
+               __field(int, error)
+               __string(program, program)
+               __string(server, server)
+       ),
+
+       TP_fast_assign(
+               __entry->error = error;
+               __assign_str(program, program)
+               __assign_str(server, server)
+       ),
+
+       TP_printk("program=%s server=%s error=%d",
+               __get_str(program), __get_str(server), __entry->error)
+);
+
+TRACE_EVENT(rpc_clnt_clone_err,
+       TP_PROTO(
+               const struct rpc_clnt *clnt,
+               int error
+       ),
+
+       TP_ARGS(clnt, error),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, client_id)
+               __field(int, error)
+       ),
+
+       TP_fast_assign(
+               __entry->client_id = clnt->cl_clid;
+               __entry->error = error;
+       ),
+
+       TP_printk("client=%u error=%d", __entry->client_id, __entry->error)
+);
 
-DEFINE_XDRBUF_EVENT(xprt_sendto);
-DEFINE_XDRBUF_EVENT(xprt_recvfrom);
-DEFINE_XDRBUF_EVENT(svc_recvfrom);
-DEFINE_XDRBUF_EVENT(svc_sendto);
 
 TRACE_DEFINE_ENUM(RPC_AUTH_OK);
 TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
@@ -142,29 +294,35 @@ TRACE_EVENT(rpc_request,
 
 TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
 TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
+TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS);
 TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
 TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
 TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
+TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN);
 TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
 TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
 TRACE_DEFINE_ENUM(RPC_TASK_SENT);
 TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
 TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
 TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
+TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
 
 #define rpc_show_task_flags(flags)                                     \
        __print_flags(flags, "|",                                       \
                { RPC_TASK_ASYNC, "ASYNC" },                            \
                { RPC_TASK_SWAPPER, "SWAPPER" },                        \
+               { RPC_TASK_NULLCREDS, "NULLCREDS" },                    \
                { RPC_CALL_MAJORSEEN, "MAJORSEEN" },                    \
                { RPC_TASK_ROOTCREDS, "ROOTCREDS" },                    \
                { RPC_TASK_DYNAMIC, "DYNAMIC" },                        \
+               { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" },          \
                { RPC_TASK_SOFT, "SOFT" },                              \
                { RPC_TASK_SOFTCONN, "SOFTCONN" },                      \
                { RPC_TASK_SENT, "SENT" },                              \
                { RPC_TASK_TIMEOUT, "TIMEOUT" },                        \
                { RPC_TASK_NOCONNECT, "NOCONNECT" },                    \
-               { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
+               { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" },               \
+               { RPC_TASK_CRED_NOREF, "CRED_NOREF" })
 
 TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
 TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
@@ -359,6 +517,34 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
 DEFINE_RPC_REPLY_EVENT(bad_creds);
 DEFINE_RPC_REPLY_EVENT(auth_tooweak);
 
+TRACE_EVENT(rpc_call_rpcerror,
+       TP_PROTO(
+               const struct rpc_task *task,
+               int tk_status,
+               int rpc_status
+       ),
+
+       TP_ARGS(task, tk_status, rpc_status),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, task_id)
+               __field(unsigned int, client_id)
+               __field(int, tk_status)
+               __field(int, rpc_status)
+       ),
+
+       TP_fast_assign(
+               __entry->client_id = task->tk_client->cl_clid;
+               __entry->task_id = task->tk_pid;
+               __entry->tk_status = tk_status;
+               __entry->rpc_status = rpc_status;
+       ),
+
+       TP_printk("task:%u@%u tk_status=%d rpc_status=%d",
+               __entry->task_id, __entry->client_id,
+               __entry->tk_status, __entry->rpc_status)
+);
+
 TRACE_EVENT(rpc_stats_latency,
 
        TP_PROTO(
@@ -526,43 +712,6 @@ TRACE_EVENT(rpc_xdr_alignment,
        )
 );
 
-TRACE_EVENT(rpc_reply_pages,
-       TP_PROTO(
-               const struct rpc_rqst *req
-       ),
-
-       TP_ARGS(req),
-
-       TP_STRUCT__entry(
-               __field(unsigned int, task_id)
-               __field(unsigned int, client_id)
-               __field(const void *, head_base)
-               __field(size_t, head_len)
-               __field(const void *, tail_base)
-               __field(size_t, tail_len)
-               __field(unsigned int, page_len)
-       ),
-
-       TP_fast_assign(
-               __entry->task_id = req->rq_task->tk_pid;
-               __entry->client_id = req->rq_task->tk_client->cl_clid;
-
-               __entry->head_base = req->rq_rcv_buf.head[0].iov_base;
-               __entry->head_len = req->rq_rcv_buf.head[0].iov_len;
-               __entry->page_len = req->rq_rcv_buf.page_len;
-               __entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
-               __entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
-       ),
-
-       TP_printk(
-               "task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
-               __entry->task_id, __entry->client_id,
-               __entry->head_base, __entry->head_len,
-               __entry->page_len,
-               __entry->tail_base, __entry->tail_len
-       )
-);
-
 /*
  * First define the enums in the below macros to be exported to userspace
  * via TRACE_DEFINE_ENUM().
@@ -575,9 +724,9 @@ TRACE_EVENT(rpc_reply_pages,
 #define RPC_SHOW_SOCKET                                \
        EM( SS_FREE, "FREE" )                   \
        EM( SS_UNCONNECTED, "UNCONNECTED" )     \
-       EM( SS_CONNECTING, "CONNECTING," )      \
-       EM( SS_CONNECTED, "CONNECTED," )        \
-       EMe(SS_DISCONNECTING, "DISCONNECTING" )
+       EM( SS_CONNECTING, "CONNECTING" )       \
+       EM( SS_CONNECTED, "CONNECTED" )         \
+       EMe( SS_DISCONNECTING, "DISCONNECTING" )
 
 #define rpc_show_socket_state(state) \
        __print_symbolic(state, RPC_SHOW_SOCKET)
@@ -719,6 +868,69 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
 DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
 DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
 
+TRACE_DEFINE_ENUM(XPRT_LOCKED);
+TRACE_DEFINE_ENUM(XPRT_CONNECTED);
+TRACE_DEFINE_ENUM(XPRT_CONNECTING);
+TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT);
+TRACE_DEFINE_ENUM(XPRT_BOUND);
+TRACE_DEFINE_ENUM(XPRT_BINDING);
+TRACE_DEFINE_ENUM(XPRT_CLOSING);
+TRACE_DEFINE_ENUM(XPRT_CONGESTED);
+TRACE_DEFINE_ENUM(XPRT_CWND_WAIT);
+TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
+
+#define rpc_show_xprt_state(x)                                         \
+       __print_flags(x, "|",                                           \
+               { (1UL << XPRT_LOCKED),         "LOCKED"},              \
+               { (1UL << XPRT_CONNECTED),      "CONNECTED"},           \
+               { (1UL << XPRT_CONNECTING),     "CONNECTING"},          \
+               { (1UL << XPRT_CLOSE_WAIT),     "CLOSE_WAIT"},          \
+               { (1UL << XPRT_BOUND),          "BOUND"},               \
+               { (1UL << XPRT_BINDING),        "BINDING"},             \
+               { (1UL << XPRT_CLOSING),        "CLOSING"},             \
+               { (1UL << XPRT_CONGESTED),      "CONGESTED"},           \
+               { (1UL << XPRT_CWND_WAIT),      "CWND_WAIT"},           \
+               { (1UL << XPRT_WRITE_SPACE),    "WRITE_SPACE"})
+
+DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
+       TP_PROTO(
+               const struct rpc_xprt *xprt
+       ),
+
+       TP_ARGS(xprt),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, state)
+               __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+               __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+       ),
+
+       TP_fast_assign(
+               __entry->state = xprt->state;
+               __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+               __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+       ),
+
+       TP_printk("peer=[%s]:%s state=%s",
+               __get_str(addr), __get_str(port),
+               rpc_show_xprt_state(__entry->state))
+);
+
+#define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \
+       DEFINE_EVENT(rpc_xprt_lifetime_class, \
+                       xprt_##name, \
+                       TP_PROTO( \
+                               const struct rpc_xprt *xprt \
+                       ), \
+                       TP_ARGS(xprt))
+
+DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
+
 DECLARE_EVENT_CLASS(rpc_xprt_event,
        TP_PROTO(
                const struct rpc_xprt *xprt,
@@ -990,6 +1202,54 @@ TRACE_EVENT(xs_stream_read_request,
                        __entry->copied, __entry->reclen, __entry->offset)
 );
 
+
+DECLARE_EVENT_CLASS(svc_xdr_buf_class,
+       TP_PROTO(
+               const struct svc_rqst *rqst,
+               const struct xdr_buf *xdr
+       ),
+
+       TP_ARGS(rqst, xdr),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __field(const void *, head_base)
+               __field(size_t, head_len)
+               __field(const void *, tail_base)
+               __field(size_t, tail_len)
+               __field(unsigned int, page_len)
+               __field(unsigned int, msg_len)
+       ),
+
+       TP_fast_assign(
+               __entry->xid = be32_to_cpu(rqst->rq_xid);
+               __entry->head_base = xdr->head[0].iov_base;
+               __entry->head_len = xdr->head[0].iov_len;
+               __entry->tail_base = xdr->tail[0].iov_base;
+               __entry->tail_len = xdr->tail[0].iov_len;
+               __entry->page_len = xdr->page_len;
+               __entry->msg_len = xdr->len;
+       ),
+
+       TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+               __entry->xid,
+               __entry->head_base, __entry->head_len, __entry->page_len,
+               __entry->tail_base, __entry->tail_len, __entry->msg_len
+       )
+);
+
+#define DEFINE_SVCXDRBUF_EVENT(name)                                   \
+               DEFINE_EVENT(svc_xdr_buf_class,                         \
+                               svc_xdr_##name,                         \
+                               TP_PROTO(                               \
+                                       const struct svc_rqst *rqst,    \
+                                       const struct xdr_buf *xdr       \
+                               ),                                      \
+                               TP_ARGS(rqst, xdr))
+
+DEFINE_SVCXDRBUF_EVENT(recvfrom);
+DEFINE_SVCXDRBUF_EVENT(sendto);
+
 #define show_rqstp_flags(flags)                                                \
        __print_flags(flags, "|",                                       \
                { (1UL << RQ_SECURE),           "RQ_SECURE"},           \
@@ -1024,6 +1284,17 @@ TRACE_EVENT(svc_recv,
                        show_rqstp_flags(__entry->flags))
 );
 
+TRACE_DEFINE_ENUM(SVC_GARBAGE);
+TRACE_DEFINE_ENUM(SVC_SYSERR);
+TRACE_DEFINE_ENUM(SVC_VALID);
+TRACE_DEFINE_ENUM(SVC_NEGATIVE);
+TRACE_DEFINE_ENUM(SVC_OK);
+TRACE_DEFINE_ENUM(SVC_DROP);
+TRACE_DEFINE_ENUM(SVC_CLOSE);
+TRACE_DEFINE_ENUM(SVC_DENIED);
+TRACE_DEFINE_ENUM(SVC_PENDING);
+TRACE_DEFINE_ENUM(SVC_COMPLETE);
+
 #define svc_show_status(status)                                \
        __print_symbolic(status,                        \
                { SVC_GARBAGE,  "SVC_GARBAGE" },        \
@@ -1167,28 +1438,54 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
                { (1UL << XPT_KILL_TEMP),       "XPT_KILL_TEMP"},       \
                { (1UL << XPT_CONG_CTRL),       "XPT_CONG_CTRL"})
 
+TRACE_EVENT(svc_xprt_create_err,
+       TP_PROTO(
+               const char *program,
+               const char *protocol,
+               struct sockaddr *sap,
+               const struct svc_xprt *xprt
+       ),
+
+       TP_ARGS(program, protocol, sap, xprt),
+
+       TP_STRUCT__entry(
+               __field(long, error)
+               __string(program, program)
+               __string(protocol, protocol)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+
+       TP_fast_assign(
+               __entry->error = PTR_ERR(xprt);
+               __assign_str(program, program);
+               __assign_str(protocol, protocol);
+               memcpy(__entry->addr, sap, sizeof(__entry->addr));
+       ),
+
+       TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
+               __entry->addr, __get_str(program), __get_str(protocol),
+               __entry->error)
+);
+
 TRACE_EVENT(svc_xprt_do_enqueue,
        TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
 
        TP_ARGS(xprt, rqst),
 
        TP_STRUCT__entry(
-               __field(struct svc_xprt *, xprt)
                __field(int, pid)
                __field(unsigned long, flags)
                __string(addr, xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xprt = xprt;
                __entry->pid = rqst? rqst->rq_task->pid : 0;
                __entry->flags = xprt->xpt_flags;
                __assign_str(addr, xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xprt=%p addr=%s pid=%d flags=%s",
-                       __entry->xprt, __get_str(addr),
-                       __entry->pid, show_svc_xprt_flags(__entry->flags))
+       TP_printk("addr=%s pid=%d flags=%s", __get_str(addr),
+               __entry->pid, show_svc_xprt_flags(__entry->flags))
 );
 
 DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -1197,25 +1494,55 @@ DECLARE_EVENT_CLASS(svc_xprt_event,
        TP_ARGS(xprt),
 
        TP_STRUCT__entry(
-               __field(struct svc_xprt *, xprt)
                __field(unsigned long, flags)
                __string(addr, xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xprt = xprt;
                __entry->flags = xprt->xpt_flags;
                __assign_str(addr, xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xprt=%p addr=%s flags=%s",
-                       __entry->xprt, __get_str(addr),
-                       show_svc_xprt_flags(__entry->flags))
+       TP_printk("addr=%s flags=%s", __get_str(addr),
+               show_svc_xprt_flags(__entry->flags))
 );
 
-DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
-       TP_PROTO(struct svc_xprt *xprt),
-       TP_ARGS(xprt));
+#define DEFINE_SVC_XPRT_EVENT(name) \
+       DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \
+                       TP_PROTO( \
+                               struct svc_xprt *xprt \
+                       ), \
+                       TP_ARGS(xprt))
+
+DEFINE_SVC_XPRT_EVENT(no_write_space);
+DEFINE_SVC_XPRT_EVENT(close);
+DEFINE_SVC_XPRT_EVENT(detach);
+DEFINE_SVC_XPRT_EVENT(free);
+
+TRACE_EVENT(svc_xprt_accept,
+       TP_PROTO(
+               const struct svc_xprt *xprt,
+               const char *service
+       ),
+
+       TP_ARGS(xprt, service),
+
+       TP_STRUCT__entry(
+               __string(addr, xprt->xpt_remotebuf)
+               __string(protocol, xprt->xpt_class->xcl_name)
+               __string(service, service)
+       ),
+
+       TP_fast_assign(
+               __assign_str(addr, xprt->xpt_remotebuf);
+               __assign_str(protocol, xprt->xpt_class->xcl_name)
+               __assign_str(service, service);
+       ),
+
+       TP_printk("addr=%s protocol=%s service=%s",
+               __get_str(addr), __get_str(protocol), __get_str(service)
+       )
+);
 
 TRACE_EVENT(svc_xprt_dequeue,
        TP_PROTO(struct svc_rqst *rqst),
@@ -1223,24 +1550,20 @@ TRACE_EVENT(svc_xprt_dequeue,
        TP_ARGS(rqst),
 
        TP_STRUCT__entry(
-               __field(struct svc_xprt *, xprt)
                __field(unsigned long, flags)
                __field(unsigned long, wakeup)
                __string(addr, rqst->rq_xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xprt = rqst->rq_xprt;
                __entry->flags = rqst->rq_xprt->xpt_flags;
                __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
                                                        rqst->rq_qtime));
                __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu",
-                       __entry->xprt, __get_str(addr),
-                       show_svc_xprt_flags(__entry->flags),
-                       __entry->wakeup)
+       TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr),
+               show_svc_xprt_flags(__entry->flags), __entry->wakeup)
 );
 
 TRACE_EVENT(svc_wake_up,
@@ -1265,21 +1588,18 @@ TRACE_EVENT(svc_handle_xprt,
        TP_ARGS(xprt, len),
 
        TP_STRUCT__entry(
-               __field(struct svc_xprt *, xprt)
                __field(int, len)
                __field(unsigned long, flags)
                __string(addr, xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xprt = xprt;
                __entry->len = len;
                __entry->flags = xprt->xpt_flags;
                __assign_str(addr, xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xprt=%p addr=%s len=%d flags=%s",
-               __entry->xprt, __get_str(addr),
+       TP_printk("addr=%s len=%d flags=%s", __get_str(addr),
                __entry->len, show_svc_xprt_flags(__entry->flags))
 );
 
@@ -1313,27 +1633,221 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
        TP_ARGS(dr),
 
        TP_STRUCT__entry(
+               __field(const void *, dr)
                __field(u32, xid)
                __string(addr, dr->xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
+               __entry->dr = dr;
                __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
                                                       (dr->xprt_hlen>>2)));
                __assign_str(addr, dr->xprt->xpt_remotebuf);
        ),
 
-       TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+       TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+               __entry->xid)
 );
+
 #define DEFINE_SVC_DEFERRED_EVENT(name) \
-       DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \
+       DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \
                        TP_PROTO( \
                                const struct svc_deferred_req *dr \
                        ), \
                        TP_ARGS(dr))
 
 DEFINE_SVC_DEFERRED_EVENT(drop);
-DEFINE_SVC_DEFERRED_EVENT(revisit);
+DEFINE_SVC_DEFERRED_EVENT(queue);
+DEFINE_SVC_DEFERRED_EVENT(recv);
+
+TRACE_EVENT(svcsock_new_socket,
+       TP_PROTO(
+               const struct socket *socket
+       ),
+
+       TP_ARGS(socket),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, type)
+               __field(unsigned long, family)
+               __field(bool, listener)
+       ),
+
+       TP_fast_assign(
+               __entry->type = socket->type;
+               __entry->family = socket->sk->sk_family;
+               __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+       ),
+
+       TP_printk("type=%s family=%s%s",
+               show_socket_type(__entry->type),
+               rpc_show_address_family(__entry->family),
+               __entry->listener ? " (listener)" : ""
+       )
+);
+
+TRACE_EVENT(svcsock_marker,
+       TP_PROTO(
+               const struct svc_xprt *xprt,
+               __be32 marker
+       ),
+
+       TP_ARGS(xprt, marker),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, length)
+               __field(bool, last)
+               __string(addr, xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK;
+               __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT;
+               __assign_str(addr, xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s length=%u%s", __get_str(addr),
+               __entry->length, __entry->last ? " (last)" : "")
+);
+
+DECLARE_EVENT_CLASS(svcsock_class,
+       TP_PROTO(
+               const struct svc_xprt *xprt,
+               ssize_t result
+       ),
+
+       TP_ARGS(xprt, result),
+
+       TP_STRUCT__entry(
+               __field(ssize_t, result)
+               __field(unsigned long, flags)
+               __string(addr, xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->result = result;
+               __entry->flags = xprt->xpt_flags;
+               __assign_str(addr, xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s result=%zd flags=%s", __get_str(addr),
+               __entry->result, show_svc_xprt_flags(__entry->flags)
+       )
+);
+
+#define DEFINE_SVCSOCK_EVENT(name) \
+       DEFINE_EVENT(svcsock_class, svcsock_##name, \
+                       TP_PROTO( \
+                               const struct svc_xprt *xprt, \
+                               ssize_t result \
+                       ), \
+                       TP_ARGS(xprt, result))
+
+DEFINE_SVCSOCK_EVENT(udp_send);
+DEFINE_SVCSOCK_EVENT(udp_recv);
+DEFINE_SVCSOCK_EVENT(udp_recv_err);
+DEFINE_SVCSOCK_EVENT(tcp_send);
+DEFINE_SVCSOCK_EVENT(tcp_recv);
+DEFINE_SVCSOCK_EVENT(tcp_recv_eagain);
+DEFINE_SVCSOCK_EVENT(tcp_recv_err);
+DEFINE_SVCSOCK_EVENT(data_ready);
+DEFINE_SVCSOCK_EVENT(write_space);
+
+TRACE_EVENT(svcsock_tcp_recv_short,
+       TP_PROTO(
+               const struct svc_xprt *xprt,
+               u32 expected,
+               u32 received
+       ),
+
+       TP_ARGS(xprt, expected, received),
+
+       TP_STRUCT__entry(
+               __field(u32, expected)
+               __field(u32, received)
+               __field(unsigned long, flags)
+               __string(addr, xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->expected = expected;
+               __entry->received = received;
+               __entry->flags = xprt->xpt_flags;
+               __assign_str(addr, xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s flags=%s expected=%u received=%u",
+               __get_str(addr), show_svc_xprt_flags(__entry->flags),
+               __entry->expected, __entry->received
+       )
+);
+
+TRACE_EVENT(svcsock_tcp_state,
+       TP_PROTO(
+               const struct svc_xprt *xprt,
+               const struct socket *socket
+       ),
+
+       TP_ARGS(xprt, socket),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, socket_state)
+               __field(unsigned long, sock_state)
+               __field(unsigned long, flags)
+               __string(addr, xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->socket_state = socket->state;
+               __entry->sock_state = socket->sk->sk_state;
+               __entry->flags = xprt->xpt_flags;
+               __assign_str(addr, xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr),
+               rpc_show_socket_state(__entry->socket_state),
+               rpc_show_sock_state(__entry->sock_state),
+               show_svc_xprt_flags(__entry->flags)
+       )
+);
+
+DECLARE_EVENT_CLASS(svcsock_accept_class,
+       TP_PROTO(
+               const struct svc_xprt *xprt,
+               const char *service,
+               long status
+       ),
+
+       TP_ARGS(xprt, service, status),
+
+       TP_STRUCT__entry(
+               __field(long, status)
+               __string(service, service)
+               __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+       ),
+
+       TP_fast_assign(
+               __entry->status = status;
+               __assign_str(service, service);
+               memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr));
+       ),
+
+       TP_printk("listener=%pISpc service=%s status=%ld",
+               __entry->addr, __get_str(service), __entry->status
+       )
+);
+
+#define DEFINE_ACCEPT_EVENT(name) \
+       DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \
+                       TP_PROTO( \
+                               const struct svc_xprt *xprt, \
+                               const char *service, \
+                               long status \
+                       ), \
+                       TP_ARGS(xprt, service, status))
+
+DEFINE_ACCEPT_EVENT(accept);
+DEFINE_ACCEPT_EVENT(getpeername);
 
 DECLARE_EVENT_CLASS(cache_event,
        TP_PROTO(
@@ -1368,6 +1882,86 @@ DEFINE_CACHE_EVENT(cache_entry_update);
 DEFINE_CACHE_EVENT(cache_entry_make_negative);
 DEFINE_CACHE_EVENT(cache_entry_no_listener);
 
+DECLARE_EVENT_CLASS(register_class,
+       TP_PROTO(
+               const char *program,
+               const u32 version,
+               const int family,
+               const unsigned short protocol,
+               const unsigned short port,
+               int error
+       ),
+
+       TP_ARGS(program, version, family, protocol, port, error),
+
+       TP_STRUCT__entry(
+               __field(u32, version)
+               __field(unsigned long, family)
+               __field(unsigned short, protocol)
+               __field(unsigned short, port)
+               __field(int, error)
+               __string(program, program)
+       ),
+
+       TP_fast_assign(
+               __entry->version = version;
+               __entry->family = family;
+               __entry->protocol = protocol;
+               __entry->port = port;
+               __entry->error = error;
+               __assign_str(program, program);
+       ),
+
+       TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d",
+               __get_str(program), __entry->version,
+               __entry->protocol == IPPROTO_UDP ? "udp" : "tcp",
+               __entry->port, rpc_show_address_family(__entry->family),
+               __entry->error
+       )
+);
+
+#define DEFINE_REGISTER_EVENT(name) \
+       DEFINE_EVENT(register_class, svc_##name, \
+                       TP_PROTO( \
+                               const char *program, \
+                               const u32 version, \
+                               const int family, \
+                               const unsigned short protocol, \
+                               const unsigned short port, \
+                               int error \
+                       ), \
+                       TP_ARGS(program, version, family, protocol, \
+                               port, error))
+
+DEFINE_REGISTER_EVENT(register);
+DEFINE_REGISTER_EVENT(noregister);
+
+TRACE_EVENT(svc_unregister,
+       TP_PROTO(
+               const char *program,
+               const u32 version,
+               int error
+       ),
+
+       TP_ARGS(program, version, error),
+
+       TP_STRUCT__entry(
+               __field(u32, version)
+               __field(int, error)
+               __string(program, program)
+       ),
+
+       TP_fast_assign(
+               __entry->version = version;
+               __entry->error = error;
+               __assign_str(program, program);
+       ),
+
+       TP_printk("program=%sv%u error=%d",
+               __get_str(program), __entry->version, __entry->error
+       )
+);
+
 #endif /* _TRACE_SUNRPC_H */
 
 #include <trace/define_trace.h>
index 9fe72e4..0c23496 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/types.h>
 #include <linux/ioctl.h>
 
+#define VHOST_FILE_UNBIND -1
+
 /* ioctls */
 
 #define VHOST_VIRTIO 0xAF
 /* Get the max ring size. */
 #define VHOST_VDPA_GET_VRING_NUM       _IOR(VHOST_VIRTIO, 0x76, __u16)
 
+/* Set event fd for config interrupt*/
+#define VHOST_VDPA_SET_CONFIG_CALL     _IOW(VHOST_VIRTIO, 0x77, int)
 #endif
index ecc27a1..b052355 100644 (file)
@@ -44,6 +44,7 @@
 #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
 #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
 #define VIRTIO_ID_IOMMU        23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM          24 /* virtio mem */
 #define VIRTIO_ID_FS           26 /* virtio filesystem */
 #define VIRTIO_ID_PMEM         27 /* virtio pmem */
 #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
new file mode 100644 (file)
index 0000000..a9ffe04
--- /dev/null
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio Mem Device
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Authors:
+ *     David Hildenbrand <david@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MEM_H
+#define _LINUX_VIRTIO_MEM_H
+
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+/*
+ * Each virtio-mem device manages a dedicated region in physical address
+ * space. Each device can belong to a single NUMA node, multiple devices
+ * for a single NUMA node are possible. A virtio-mem device is like a
+ * "resizable DIMM" consisting of small memory blocks that can be plugged
+ * or unplugged. The device driver is responsible for (un)plugging memory
+ * blocks on demand.
+ *
+ * Virtio-mem devices can only operate on their assigned memory region in
+ * order to (un)plug memory. A device cannot (un)plug memory belonging to
+ * other devices.
+ *
+ * The "region_size" corresponds to the maximum amount of memory that can
+ * be provided by a device. The "size" corresponds to the amount of memory
+ * that is currently plugged. "requested_size" corresponds to a request
+ * from the device to the device driver to (un)plug blocks. The
+ * device driver should try to (un)plug blocks in order to reach the
+ * "requested_size". It is impossible to plug more memory than requested.
+ *
+ * The "usable_region_size" represents the memory region that can actually
+ * be used to (un)plug memory. It is always at least as big as the
+ * "requested_size" and will grow dynamically. It will only shrink when
+ * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
+ *
+ * There are no guarantees what will happen if unplugged memory is
+ * read/written. Such memory should, in general, not be touched. E.g.,
+ * even writing might succeed, but the values will simply be discarded at
+ * random points in time.
+ *
+ * It can happen that the device cannot process a request, because it is
+ * busy. The device driver has to retry later.
+ *
+ * Usually, during system resets all memory will get unplugged, so the
+ * device driver can start with a clean state. However, in specific
+ * scenarios (if the device is busy) it can happen that the device still
+ * has memory plugged. The device driver can request to unplug all memory
+ * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
+ * device is busy.
+ */
+
+/* --- virtio-mem: feature bits --- */
+
+/* node_id is an ACPI PXM and is valid */
+#define VIRTIO_MEM_F_ACPI_PXM          0
+
+
+/* --- virtio-mem: guest -> host requests --- */
+
+/* request to plug memory blocks */
+#define VIRTIO_MEM_REQ_PLUG                    0
+/* request to unplug memory blocks */
+#define VIRTIO_MEM_REQ_UNPLUG                  1
+/* request to unplug all blocks and shrink the usable size */
+#define VIRTIO_MEM_REQ_UNPLUG_ALL              2
+/* request information about the plugged state of memory blocks */
+#define VIRTIO_MEM_REQ_STATE                   3
+
+struct virtio_mem_req_plug {
+       __virtio64 addr;
+       __virtio16 nb_blocks;
+       __virtio16 padding[3];
+};
+
+struct virtio_mem_req_unplug {
+       __virtio64 addr;
+       __virtio16 nb_blocks;
+       __virtio16 padding[3];
+};
+
+struct virtio_mem_req_state {
+       __virtio64 addr;
+       __virtio16 nb_blocks;
+       __virtio16 padding[3];
+};
+
+struct virtio_mem_req {
+       __virtio16 type;
+       __virtio16 padding[3];
+
+       union {
+               struct virtio_mem_req_plug plug;
+               struct virtio_mem_req_unplug unplug;
+               struct virtio_mem_req_state state;
+       } u;
+};
+
+
+/* --- virtio-mem: host -> guest response --- */
+
+/*
+ * Request processed successfully, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ACK                    0
+/*
+ * Request denied - e.g. trying to plug more than requested, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ */
+#define VIRTIO_MEM_RESP_NACK                   1
+/*
+ * Request cannot be processed right now, try again later, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ */
+#define VIRTIO_MEM_RESP_BUSY                   2
+/*
+ * Error in request (e.g. addresses/alignment), applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ERROR                  3
+
+
+/* State of memory blocks is "plugged" */
+#define VIRTIO_MEM_STATE_PLUGGED               0
+/* State of memory blocks is "unplugged" */
+#define VIRTIO_MEM_STATE_UNPLUGGED             1
+/* State of memory blocks is "mixed" */
+#define VIRTIO_MEM_STATE_MIXED                 2
+
+struct virtio_mem_resp_state {
+       __virtio16 state;
+};
+
+struct virtio_mem_resp {
+       __virtio16 type;
+       __virtio16 padding[3];
+
+       union {
+               struct virtio_mem_resp_state state;
+       } u;
+};
+
+/* --- virtio-mem: configuration --- */
+
+struct virtio_mem_config {
+       /* Block size and alignment. Cannot change. */
+       __u64 block_size;
+       /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
+       __u16 node_id;
+       __u8 padding[6];
+       /* Start address of the memory region. Cannot change. */
+       __u64 addr;
+       /* Region size (maximum). Cannot change. */
+       __u64 region_size;
+       /*
+        * Currently usable region size. Can grow up to region_size. Can
+        * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
+        * update will be sent).
+        */
+       __u64 usable_region_size;
+       /*
+        * Currently used size. Changes due to plug/unplug requests, but no
+        * config updates will be sent.
+        */
+       __u64 plugged_size;
+       /* Requested size. New plug requests cannot exceed it. Can change. */
+       __u64 requested_size;
+};
+
+#endif /* _LINUX_VIRTIO_MEM_H */
index 559f42e..476d3e5 100644 (file)
  * at the end of the used ring. Guest should ignore the used->flags field. */
 #define VIRTIO_RING_F_EVENT_IDX                29
 
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
 /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
 struct vring_desc {
        /* Address (guest-physical). */
@@ -112,28 +119,47 @@ struct vring_used_elem {
        __virtio32 len;
 };
 
+typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+       vring_used_elem_t;
+
 struct vring_used {
        __virtio16 flags;
        __virtio16 idx;
-       struct vring_used_elem ring[];
+       vring_used_elem_t ring[];
 };
 
+/*
+ * The ring element addresses are passed between components with different
+ * alignments assumptions. Thus, we might need to decrease the compiler-selected
+ * alignment, and so must use a typedef to make sure the aligned attribute
+ * actually takes hold:
+ *
+ * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
+ *
+ * When used on a struct, or struct member, the aligned attribute can only
+ * increase the alignment; in order to decrease it, the packed attribute must
+ * be specified as well. When used as part of a typedef, the aligned attribute
+ * can both increase and decrease alignment, and specifying the packed
+ * attribute generates a warning.
+ */
+typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
+       vring_desc_t;
+typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
+       vring_avail_t;
+typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+       vring_used_t;
+
 struct vring {
        unsigned int num;
 
-       struct vring_desc *desc;
+       vring_desc_t *desc;
 
-       struct vring_avail *avail;
+       vring_avail_t *avail;
 
-       struct vring_used *used;
+       vring_used_t *used;
 };
 
-/* Alignment requirements for vring elements.
- * When using pre-virtio 1.0 layout, these fall out naturally.
- */
-#define VRING_AVAIL_ALIGN_SIZE 2
-#define VRING_USED_ALIGN_SIZE 4
-#define VRING_DESC_ALIGN_SIZE 16
+#ifndef VIRTIO_RING_NO_LEGACY
 
 /* The standard layout for the ring is a continuous chunk of memory which looks
  * like this.  We assume num is a power of 2.
@@ -181,6 +207,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
                + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
 }
 
+#endif /* VIRTIO_RING_NO_LEGACY */
+
 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
 /* Assuming a given event_idx value from the other side, if
  * we have just incremented index from old to new_idx,
index 47ffe32..4b48fbf 100644 (file)
@@ -104,6 +104,7 @@ struct mtd_write_req {
 #define MTD_BIT_WRITEABLE      0x800   /* Single bits can be flipped */
 #define MTD_NO_ERASE           0x1000  /* No erase necessary */
 #define MTD_POWERUP_LOCK       0x2000  /* Always locked after reset */
+#define MTD_SLC_ON_MLC_EMULATION 0x4000        /* Emulate SLC behavior on MLC NANDs */
 
 /* Some common devices / combinations of capabilities */
 #define MTD_CAP_ROM            0
index 58a4b70..49eb7a3 100644 (file)
@@ -1293,7 +1293,6 @@ config LD_DEAD_CODE_DATA_ELIMINATION
        bool "Dead code and data elimination (EXPERIMENTAL)"
        depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        depends on EXPERT
-       depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
        depends on $(cc-option,-ffunction-sections -fdata-sections)
        depends on $(ld-option,--gc-sections)
        help
index c3ae2ad..9693730 100644 (file)
@@ -74,32 +74,19 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
                             size_t expected_size,
                             size_t actual_size)
 {
-       unsigned char __user *addr;
-       unsigned char __user *end;
-       unsigned char val;
-       int err;
+       unsigned char __user *addr = uaddr + expected_size;
+       int res;
 
        if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
                return -E2BIG;
 
-       if (unlikely(!access_ok(uaddr, actual_size)))
-               return -EFAULT;
-
        if (actual_size <= expected_size)
                return 0;
 
-       addr = uaddr + expected_size;
-       end  = uaddr + actual_size;
-
-       for (; addr < end; addr++) {
-               err = get_user(val, addr);
-               if (err)
-                       return err;
-               if (val)
-                       return -E2BIG;
-       }
-
-       return 0;
+       res = check_zeroed_user(addr, actual_size - expected_size);
+       if (res < 0)
+               return res;
+       return res ? 0 : -E2BIG;
 }
 
 const struct bpf_map_ops bpf_map_offload_ops = {
index 3941a9c..feaad59 100644 (file)
@@ -51,28 +51,4 @@ config GCOV_PROFILE_ALL
        larger and run slower. Also be sure to exclude files from profiling
        which are not linked to the kernel image to prevent linker errors.
 
-choice
-       prompt "Specify GCOV format"
-       depends on GCOV_KERNEL
-       depends on CC_IS_GCC
-       ---help---
-       The gcov format is usually determined by the GCC version, and the
-       default is chosen according to your GCC version. However, there are
-       exceptions where format changes are integrated in lower-version GCCs.
-       In such a case, change this option to adjust the format used in the
-       kernel accordingly.
-
-config GCOV_FORMAT_3_4
-       bool "GCC 3.4 format"
-       depends on GCC_VERSION < 40700
-       ---help---
-       Select this option to use the format defined by GCC 3.4.
-
-config GCOV_FORMAT_4_7
-       bool "GCC 4.7 format"
-       ---help---
-       Select this option to use the format defined by GCC 4.7.
-
-endchoice
-
 endmenu
index d66a74b..16f8ecc 100644 (file)
@@ -2,6 +2,5 @@
 ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
 
 obj-y := base.o fs.o
-obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_base.o gcc_3_4.o
-obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_base.o gcc_4_7.o
+obj-$(CONFIG_CC_IS_GCC) += gcc_base.o gcc_4_7.o
 obj-$(CONFIG_CC_IS_CLANG) += clang.o
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
deleted file mode 100644 (file)
index acb8355..0000000
+++ /dev/null
@@ -1,573 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *  This code provides functions to handle gcc's profiling data format
- *  introduced with gcc 3.4. Future versions of gcc may change the gcov
- *  format (as happened before), so all format-specific information needs
- *  to be kept modular and easily exchangeable.
- *
- *  This file is based on gcc-internal definitions. Functions and data
- *  structures are defined to be compatible with gcc counterparts.
- *  For a better understanding, refer to gcc source: gcc/gcov-io.h.
- *
- *    Copyright IBM Corp. 2009
- *    Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
- *
- *    Uses gcc-internal data definitions.
- */
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-#include "gcov.h"
-
-#define GCOV_COUNTERS          5
-
-static struct gcov_info *gcov_info_head;
-
-/**
- * struct gcov_fn_info - profiling meta data per function
- * @ident: object file-unique function identifier
- * @checksum: function checksum
- * @n_ctrs: number of values per counter type belonging to this function
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time.
- */
-struct gcov_fn_info {
-       unsigned int ident;
-       unsigned int checksum;
-       unsigned int n_ctrs[];
-};
-
-/**
- * struct gcov_ctr_info - profiling data per counter type
- * @num: number of counter values for this type
- * @values: array of counter values for this type
- * @merge: merge function for counter values of this type (unused)
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time with the exception of the values array.
- */
-struct gcov_ctr_info {
-       unsigned int    num;
-       gcov_type       *values;
-       void            (*merge)(gcov_type *, unsigned int);
-};
-
-/**
- * struct gcov_info - profiling data per object file
- * @version: gcov version magic indicating the gcc version used for compilation
- * @next: list head for a singly-linked list
- * @stamp: time stamp
- * @filename: name of the associated gcov data file
- * @n_functions: number of instrumented functions
- * @functions: function data
- * @ctr_mask: mask specifying which counter types are active
- * @counts: counter data per counter type
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time with the exception of the next pointer.
- */
-struct gcov_info {
-       unsigned int                    version;
-       struct gcov_info                *next;
-       unsigned int                    stamp;
-       const char                      *filename;
-       unsigned int                    n_functions;
-       const struct gcov_fn_info       *functions;
-       unsigned int                    ctr_mask;
-       struct gcov_ctr_info            counts[];
-};
-
-/**
- * gcov_info_filename - return info filename
- * @info: profiling data set
- */
-const char *gcov_info_filename(struct gcov_info *info)
-{
-       return info->filename;
-}
-
-/**
- * gcov_info_version - return info version
- * @info: profiling data set
- */
-unsigned int gcov_info_version(struct gcov_info *info)
-{
-       return info->version;
-}
-
-/**
- * gcov_info_next - return next profiling data set
- * @info: profiling data set
- *
- * Returns next gcov_info following @info or first gcov_info in the chain if
- * @info is %NULL.
- */
-struct gcov_info *gcov_info_next(struct gcov_info *info)
-{
-       if (!info)
-               return gcov_info_head;
-
-       return info->next;
-}
-
-/**
- * gcov_info_link - link/add profiling data set to the list
- * @info: profiling data set
- */
-void gcov_info_link(struct gcov_info *info)
-{
-       info->next = gcov_info_head;
-       gcov_info_head = info;
-}
-
-/**
- * gcov_info_unlink - unlink/remove profiling data set from the list
- * @prev: previous profiling data set
- * @info: profiling data set
- */
-void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
-{
-       if (prev)
-               prev->next = info->next;
-       else
-               gcov_info_head = info->next;
-}
-
-/**
- * gcov_info_within_module - check if a profiling data set belongs to a module
- * @info: profiling data set
- * @mod: module
- *
- * Returns true if profiling data belongs module, false otherwise.
- */
-bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
-{
-       return within_module((unsigned long)info, mod);
-}
-
-/* Symbolic links to be created for each profiling data file. */
-const struct gcov_link gcov_link[] = {
-       { OBJ_TREE, "gcno" },   /* Link to .gcno file in $(objtree). */
-       { 0, NULL},
-};
-
-/*
- * Determine whether a counter is active. Based on gcc magic. Doesn't change
- * at run-time.
- */
-static int counter_active(struct gcov_info *info, unsigned int type)
-{
-       return (1 << type) & info->ctr_mask;
-}
-
-/* Determine number of active counters. Based on gcc magic. */
-static unsigned int num_counter_active(struct gcov_info *info)
-{
-       unsigned int i;
-       unsigned int result = 0;
-
-       for (i = 0; i < GCOV_COUNTERS; i++) {
-               if (counter_active(info, i))
-                       result++;
-       }
-       return result;
-}
-
-/**
- * gcov_info_reset - reset profiling data to zero
- * @info: profiling data set
- */
-void gcov_info_reset(struct gcov_info *info)
-{
-       unsigned int active = num_counter_active(info);
-       unsigned int i;
-
-       for (i = 0; i < active; i++) {
-               memset(info->counts[i].values, 0,
-                      info->counts[i].num * sizeof(gcov_type));
-       }
-}
-
-/**
- * gcov_info_is_compatible - check if profiling data can be added
- * @info1: first profiling data set
- * @info2: second profiling data set
- *
- * Returns non-zero if profiling data can be added, zero otherwise.
- */
-int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
-{
-       return (info1->stamp == info2->stamp);
-}
-
-/**
- * gcov_info_add - add up profiling data
- * @dest: profiling data set to which data is added
- * @source: profiling data set which is added
- *
- * Adds profiling counts of @source to @dest.
- */
-void gcov_info_add(struct gcov_info *dest, struct gcov_info *source)
-{
-       unsigned int i;
-       unsigned int j;
-
-       for (i = 0; i < num_counter_active(dest); i++) {
-               for (j = 0; j < dest->counts[i].num; j++) {
-                       dest->counts[i].values[j] +=
-                               source->counts[i].values[j];
-               }
-       }
-}
-
-/* Get size of function info entry. Based on gcc magic. */
-static size_t get_fn_size(struct gcov_info *info)
-{
-       size_t size;
-
-       size = sizeof(struct gcov_fn_info) + num_counter_active(info) *
-              sizeof(unsigned int);
-       if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int))
-               size = ALIGN(size, __alignof__(struct gcov_fn_info));
-       return size;
-}
-
-/* Get address of function info entry. Based on gcc magic. */
-static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn)
-{
-       return (struct gcov_fn_info *)
-               ((char *) info->functions + fn * get_fn_size(info));
-}
-
-/**
- * gcov_info_dup - duplicate profiling data set
- * @info: profiling data set to duplicate
- *
- * Return newly allocated duplicate on success, %NULL on error.
- */
-struct gcov_info *gcov_info_dup(struct gcov_info *info)
-{
-       struct gcov_info *dup;
-       unsigned int i;
-       unsigned int active;
-
-       /* Duplicate gcov_info. */
-       active = num_counter_active(info);
-       dup = kzalloc(struct_size(dup, counts, active), GFP_KERNEL);
-       if (!dup)
-               return NULL;
-       dup->version            = info->version;
-       dup->stamp              = info->stamp;
-       dup->n_functions        = info->n_functions;
-       dup->ctr_mask           = info->ctr_mask;
-       /* Duplicate filename. */
-       dup->filename           = kstrdup(info->filename, GFP_KERNEL);
-       if (!dup->filename)
-               goto err_free;
-       /* Duplicate table of functions. */
-       dup->functions = kmemdup(info->functions, info->n_functions *
-                                get_fn_size(info), GFP_KERNEL);
-       if (!dup->functions)
-               goto err_free;
-       /* Duplicate counter arrays. */
-       for (i = 0; i < active ; i++) {
-               struct gcov_ctr_info *ctr = &info->counts[i];
-               size_t size = ctr->num * sizeof(gcov_type);
-
-               dup->counts[i].num = ctr->num;
-               dup->counts[i].merge = ctr->merge;
-               dup->counts[i].values = vmalloc(size);
-               if (!dup->counts[i].values)
-                       goto err_free;
-               memcpy(dup->counts[i].values, ctr->values, size);
-       }
-       return dup;
-
-err_free:
-       gcov_info_free(dup);
-       return NULL;
-}
-
-/**
- * gcov_info_free - release memory for profiling data set duplicate
- * @info: profiling data set duplicate to free
- */
-void gcov_info_free(struct gcov_info *info)
-{
-       unsigned int active = num_counter_active(info);
-       unsigned int i;
-
-       for (i = 0; i < active ; i++)
-               vfree(info->counts[i].values);
-       kfree(info->functions);
-       kfree(info->filename);
-       kfree(info);
-}
-
-/**
- * struct type_info - iterator helper array
- * @ctr_type: counter type
- * @offset: index of the first value of the current function for this type
- *
- * This array is needed to convert the in-memory data format into the in-file
- * data format:
- *
- * In-memory:
- *   for each counter type
- *     for each function
- *       values
- *
- * In-file:
- *   for each function
- *     for each counter type
- *       values
- *
- * See gcc source gcc/gcov-io.h for more information on data organization.
- */
-struct type_info {
-       int ctr_type;
-       unsigned int offset;
-};
-
-/**
- * struct gcov_iterator - specifies current file position in logical records
- * @info: associated profiling data
- * @record: record type
- * @function: function number
- * @type: counter type
- * @count: index into values array
- * @num_types: number of counter types
- * @type_info: helper array to get values-array offset for current function
- */
-struct gcov_iterator {
-       struct gcov_info *info;
-
-       int record;
-       unsigned int function;
-       unsigned int type;
-       unsigned int count;
-
-       int num_types;
-       struct type_info type_info[];
-};
-
-static struct gcov_fn_info *get_func(struct gcov_iterator *iter)
-{
-       return get_fn_info(iter->info, iter->function);
-}
-
-static struct type_info *get_type(struct gcov_iterator *iter)
-{
-       return &iter->type_info[iter->type];
-}
-
-/**
- * gcov_iter_new - allocate and initialize profiling data iterator
- * @info: profiling data set to be iterated
- *
- * Return file iterator on success, %NULL otherwise.
- */
-struct gcov_iterator *gcov_iter_new(struct gcov_info *info)
-{
-       struct gcov_iterator *iter;
-
-       iter = kzalloc(struct_size(iter, type_info, num_counter_active(info)),
-                      GFP_KERNEL);
-       if (iter)
-               iter->info = info;
-
-       return iter;
-}
-
-/**
- * gcov_iter_free - release memory for iterator
- * @iter: file iterator to free
- */
-void gcov_iter_free(struct gcov_iterator *iter)
-{
-       kfree(iter);
-}
-
-/**
- * gcov_iter_get_info - return profiling data set for given file iterator
- * @iter: file iterator
- */
-struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
-{
-       return iter->info;
-}
-
-/**
- * gcov_iter_start - reset file iterator to starting position
- * @iter: file iterator
- */
-void gcov_iter_start(struct gcov_iterator *iter)
-{
-       int i;
-
-       iter->record = 0;
-       iter->function = 0;
-       iter->type = 0;
-       iter->count = 0;
-       iter->num_types = 0;
-       for (i = 0; i < GCOV_COUNTERS; i++) {
-               if (counter_active(iter->info, i)) {
-                       iter->type_info[iter->num_types].ctr_type = i;
-                       iter->type_info[iter->num_types++].offset = 0;
-               }
-       }
-}
-
-/* Mapping of logical record number to actual file content. */
-#define RECORD_FILE_MAGIC      0
-#define RECORD_GCOV_VERSION    1
-#define RECORD_TIME_STAMP      2
-#define RECORD_FUNCTION_TAG    3
-#define RECORD_FUNCTON_TAG_LEN 4
-#define RECORD_FUNCTION_IDENT  5
-#define RECORD_FUNCTION_CHECK  6
-#define RECORD_COUNT_TAG       7
-#define RECORD_COUNT_LEN       8
-#define RECORD_COUNT           9
-
-/**
- * gcov_iter_next - advance file iterator to next logical record
- * @iter: file iterator
- *
- * Return zero if new position is valid, non-zero if iterator has reached end.
- */
-int gcov_iter_next(struct gcov_iterator *iter)
-{
-       switch (iter->record) {
-       case RECORD_FILE_MAGIC:
-       case RECORD_GCOV_VERSION:
-       case RECORD_FUNCTION_TAG:
-       case RECORD_FUNCTON_TAG_LEN:
-       case RECORD_FUNCTION_IDENT:
-       case RECORD_COUNT_TAG:
-               /* Advance to next record */
-               iter->record++;
-               break;
-       case RECORD_COUNT:
-               /* Advance to next count */
-               iter->count++;
-               /* fall through */
-       case RECORD_COUNT_LEN:
-               if (iter->count < get_func(iter)->n_ctrs[iter->type]) {
-                       iter->record = 9;
-                       break;
-               }
-               /* Advance to next counter type */
-               get_type(iter)->offset += iter->count;
-               iter->count = 0;
-               iter->type++;
-               /* fall through */
-       case RECORD_FUNCTION_CHECK:
-               if (iter->type < iter->num_types) {
-                       iter->record = 7;
-                       break;
-               }
-               /* Advance to next function */
-               iter->type = 0;
-               iter->function++;
-               /* fall through */
-       case RECORD_TIME_STAMP:
-               if (iter->function < iter->info->n_functions)
-                       iter->record = 3;
-               else
-                       iter->record = -1;
-               break;
-       }
-       /* Check for EOF. */
-       if (iter->record == -1)
-               return -EINVAL;
-       else
-               return 0;
-}
-
-/**
- * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file
- * @seq: seq_file handle
- * @v: value to be stored
- *
- * Number format defined by gcc: numbers are recorded in the 32 bit
- * unsigned binary form of the endianness of the machine generating the
- * file.
- */
-static int seq_write_gcov_u32(struct seq_file *seq, u32 v)
-{
-       return seq_write(seq, &v, sizeof(v));
-}
-
-/**
- * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file
- * @seq: seq_file handle
- * @v: value to be stored
- *
- * Number format defined by gcc: numbers are recorded in the 32 bit
- * unsigned binary form of the endianness of the machine generating the
- * file. 64 bit numbers are stored as two 32 bit numbers, the low part
- * first.
- */
-static int seq_write_gcov_u64(struct seq_file *seq, u64 v)
-{
-       u32 data[2];
-
-       data[0] = (v & 0xffffffffUL);
-       data[1] = (v >> 32);
-       return seq_write(seq, data, sizeof(data));
-}
-
-/**
- * gcov_iter_write - write data for current pos to seq_file
- * @iter: file iterator
- * @seq: seq_file handle
- *
- * Return zero on success, non-zero otherwise.
- */
-int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
-{
-       int rc = -EINVAL;
-
-       switch (iter->record) {
-       case RECORD_FILE_MAGIC:
-               rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC);
-               break;
-       case RECORD_GCOV_VERSION:
-               rc = seq_write_gcov_u32(seq, iter->info->version);
-               break;
-       case RECORD_TIME_STAMP:
-               rc = seq_write_gcov_u32(seq, iter->info->stamp);
-               break;
-       case RECORD_FUNCTION_TAG:
-               rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION);
-               break;
-       case RECORD_FUNCTON_TAG_LEN:
-               rc = seq_write_gcov_u32(seq, 2);
-               break;
-       case RECORD_FUNCTION_IDENT:
-               rc = seq_write_gcov_u32(seq, get_func(iter)->ident);
-               break;
-       case RECORD_FUNCTION_CHECK:
-               rc = seq_write_gcov_u32(seq, get_func(iter)->checksum);
-               break;
-       case RECORD_COUNT_TAG:
-               rc = seq_write_gcov_u32(seq,
-                       GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type));
-               break;
-       case RECORD_COUNT_LEN:
-               rc = seq_write_gcov_u32(seq,
-                               get_func(iter)->n_ctrs[iter->type] * 2);
-               break;
-       case RECORD_COUNT:
-               rc = seq_write_gcov_u64(seq,
-                       iter->info->counts[iter->type].
-                               values[iter->count + get_type(iter)->offset]);
-               break;
-       }
-       return rc;
-}
index 86357cd..8e3d2d7 100644 (file)
@@ -51,6 +51,7 @@ struct kthread_create_info
 struct kthread {
        unsigned long flags;
        unsigned int cpu;
+       int (*threadfn)(void *);
        void *data;
        mm_segment_t oldfs;
        struct completion parked;
@@ -158,6 +159,20 @@ bool kthread_freezable_should_stop(bool *was_frozen)
 }
 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 
+/**
+ * kthread_func - return the function specified on kthread creation
+ * @task: kthread task in question
+ *
+ * Returns NULL if the task is not a kthread.
+ */
+void *kthread_func(struct task_struct *task)
+{
+       if (task->flags & PF_KTHREAD)
+               return to_kthread(task)->threadfn;
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(kthread_func);
+
 /**
  * kthread_data - return data value specified on kthread creation
  * @task: kthread task in question
@@ -170,6 +185,7 @@ void *kthread_data(struct task_struct *task)
 {
        return to_kthread(task)->data;
 }
+EXPORT_SYMBOL_GPL(kthread_data);
 
 /**
  * kthread_probe_data - speculative version of kthread_data()
@@ -250,6 +266,7 @@ static int kthread(void *_create)
                do_exit(-ENOMEM);
        }
 
+       self->threadfn = threadfn;
        self->data = data;
        init_completion(&self->exited);
        init_completion(&self->parked);
index ca0fcb5..01e2858 100644 (file)
@@ -1590,7 +1590,7 @@ int swsusp_unmark(void)
 }
 #endif
 
-static int swsusp_header_init(void)
+static int __init swsusp_header_init(void)
 {
        swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
        if (!swsusp_header)
index 222a7a9..5d4d9bb 100644 (file)
@@ -74,7 +74,7 @@ static void scs_check_usage(struct task_struct *tsk)
        for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
                if (!READ_ONCE_NOCHECK(*p))
                        break;
-               used++;
+               used += sizeof(*p);
        }
 
        while (used > curr) {
index c557f42..98bba47 100644 (file)
@@ -515,9 +515,8 @@ static const struct file_operations stack_trace_filter_fops = {
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 int
-stack_trace_sysctl(struct ctl_table *table, int write,
-                  void __user *buffer, size_t *lenp,
-                  loff_t *ppos)
+stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+                  size_t *lenp, loff_t *ppos)
 {
        int was_enabled;
        int ret;
index 052d3fb..7c031ee 100644 (file)
--- a/lib/bch.c
+++ b/lib/bch.c
  * This library provides runtime configurable encoding/decoding of binary
  * Bose-Chaudhuri-Hocquenghem (BCH) codes.
  *
- * Call init_bch to get a pointer to a newly allocated bch_control structure for
+ * Call bch_init to get a pointer to a newly allocated bch_control structure for
  * the given m (Galois field order), t (error correction capability) and
  * (optional) primitive polynomial parameters.
  *
- * Call encode_bch to compute and store ecc parity bytes to a given buffer.
- * Call decode_bch to detect and locate errors in received data.
+ * Call bch_encode to compute and store ecc parity bytes to a given buffer.
+ * Call bch_decode to detect and locate errors in received data.
  *
  * On systems supporting hw BCH features, intermediate results may be provided
- * to decode_bch in order to skip certain steps. See decode_bch() documentation
+ * to bch_decode in order to skip certain steps. See bch_decode() documentation
  * for details.
  *
  * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of
@@ -114,10 +114,53 @@ struct gf_poly_deg1 {
        unsigned int   c[2];
 };
 
+static u8 swap_bits_table[] = {
+       0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+       0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+       0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+       0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+       0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+       0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+       0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+       0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+       0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+       0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+       0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+       0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+       0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+       0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+       0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+       0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+       0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+       0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+       0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+       0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+       0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+       0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+       0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+       0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+       0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+       0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+       0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+       0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+       0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+       0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+       0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+       0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+
+static u8 swap_bits(struct bch_control *bch, u8 in)
+{
+       if (!bch->swap_bits)
+               return in;
+
+       return swap_bits_table[in];
+}
+
 /*
- * same as encode_bch(), but process input data one byte at a time
+ * same as bch_encode(), but process input data one byte at a time
  */
-static void encode_bch_unaligned(struct bch_control *bch,
+static void bch_encode_unaligned(struct bch_control *bch,
                                 const unsigned char *data, unsigned int len,
                                 uint32_t *ecc)
 {
@@ -126,7 +169,9 @@ static void encode_bch_unaligned(struct bch_control *bch,
        const int l = BCH_ECC_WORDS(bch)-1;
 
        while (len--) {
-               p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff);
+               u8 tmp = swap_bits(bch, *data++);
+
+               p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff);
 
                for (i = 0; i < l; i++)
                        ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++);
@@ -145,10 +190,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst,
        unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
 
        for (i = 0; i < nwords; i++, src += 4)
-               dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3];
+               dst[i] = ((u32)swap_bits(bch, src[0]) << 24) |
+                       ((u32)swap_bits(bch, src[1]) << 16) |
+                       ((u32)swap_bits(bch, src[2]) << 8) |
+                       swap_bits(bch, src[3]);
 
        memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords);
-       dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3];
+       dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) |
+               ((u32)swap_bits(bch, pad[1]) << 16) |
+               ((u32)swap_bits(bch, pad[2]) << 8) |
+               swap_bits(bch, pad[3]);
 }
 
 /*
@@ -161,20 +212,20 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
        unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
 
        for (i = 0; i < nwords; i++) {
-               *dst++ = (src[i] >> 24);
-               *dst++ = (src[i] >> 16) & 0xff;
-               *dst++ = (src[i] >>  8) & 0xff;
-               *dst++ = (src[i] >>  0) & 0xff;
+               *dst++ = swap_bits(bch, src[i] >> 24);
+               *dst++ = swap_bits(bch, src[i] >> 16);
+               *dst++ = swap_bits(bch, src[i] >> 8);
+               *dst++ = swap_bits(bch, src[i]);
        }
-       pad[0] = (src[nwords] >> 24);
-       pad[1] = (src[nwords] >> 16) & 0xff;
-       pad[2] = (src[nwords] >>  8) & 0xff;
-       pad[3] = (src[nwords] >>  0) & 0xff;
+       pad[0] = swap_bits(bch, src[nwords] >> 24);
+       pad[1] = swap_bits(bch, src[nwords] >> 16);
+       pad[2] = swap_bits(bch, src[nwords] >> 8);
+       pad[3] = swap_bits(bch, src[nwords]);
        memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords);
 }
 
 /**
- * encode_bch - calculate BCH ecc parity of data
+ * bch_encode - calculate BCH ecc parity of data
  * @bch:   BCH control structure
  * @data:  data to encode
  * @len:   data length in bytes
@@ -187,7 +238,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
  * The exact number of computed ecc parity bits is given by member @ecc_bits of
  * @bch; it may be less than m*t for large values of t.
  */
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
                unsigned int len, uint8_t *ecc)
 {
        const unsigned int l = BCH_ECC_WORDS(bch)-1;
@@ -215,7 +266,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
        m = ((unsigned long)data) & 3;
        if (m) {
                mlen = (len < (4-m)) ? len : 4-m;
-               encode_bch_unaligned(bch, data, mlen, bch->ecc_buf);
+               bch_encode_unaligned(bch, data, mlen, bch->ecc_buf);
                data += mlen;
                len  -= mlen;
        }
@@ -240,7 +291,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
         */
        while (mlen--) {
                /* input data is read in big-endian format */
-               w = r[0]^cpu_to_be32(*pdata++);
+               w = cpu_to_be32(*pdata++);
+               if (bch->swap_bits)
+                       w = (u32)swap_bits(bch, w) |
+                           ((u32)swap_bits(bch, w >> 8) << 8) |
+                           ((u32)swap_bits(bch, w >> 16) << 16) |
+                           ((u32)swap_bits(bch, w >> 24) << 24);
+               w ^= r[0];
                p0 = tab0 + (l+1)*((w >>  0) & 0xff);
                p1 = tab1 + (l+1)*((w >>  8) & 0xff);
                p2 = tab2 + (l+1)*((w >> 16) & 0xff);
@@ -255,13 +312,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
 
        /* process last unaligned bytes */
        if (len)
-               encode_bch_unaligned(bch, data, len, bch->ecc_buf);
+               bch_encode_unaligned(bch, data, len, bch->ecc_buf);
 
        /* store ecc parity bytes into original parity buffer */
        if (ecc)
                store_ecc8(bch, ecc, bch->ecc_buf);
 }
-EXPORT_SYMBOL_GPL(encode_bch);
+EXPORT_SYMBOL_GPL(bch_encode);
 
 static inline int modulo(struct bch_control *bch, unsigned int v)
 {
@@ -952,7 +1009,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
 #endif /* USE_CHIEN_SEARCH */
 
 /**
- * decode_bch - decode received codeword and find bit error locations
+ * bch_decode - decode received codeword and find bit error locations
  * @bch:      BCH control structure
  * @data:     received data, ignored if @calc_ecc is provided
  * @len:      data length in bytes, must always be provided
@@ -966,22 +1023,22 @@ static int chien_search(struct bch_control *bch, unsigned int len,
  *  invalid parameters were provided
  *
  * Depending on the available hw BCH support and the need to compute @calc_ecc
- * separately (using encode_bch()), this function should be called with one of
+ * separately (using bch_encode()), this function should be called with one of
  * the following parameter configurations -
  *
  * by providing @data and @recv_ecc only:
- *   decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
+ *   bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
  *
  * by providing @recv_ecc and @calc_ecc:
- *   decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
+ *   bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
  *
  * by providing ecc = recv_ecc XOR calc_ecc:
- *   decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
+ *   bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
  *
  * by providing syndrome results @syn:
- *   decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
+ *   bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
  *
- * Once decode_bch() has successfully returned with a positive value, error
+ * Once bch_decode() has successfully returned with a positive value, error
  * locations returned in array @errloc should be interpreted as follows -
  *
  * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for
@@ -993,7 +1050,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
  * Note that this function does not perform any data correction by itself, it
  * merely indicates error locations.
  */
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
               const uint8_t *recv_ecc, const uint8_t *calc_ecc,
               const unsigned int *syn, unsigned int *errloc)
 {
@@ -1012,7 +1069,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
                        /* compute received data ecc into an internal buffer */
                        if (!data || !recv_ecc)
                                return -EINVAL;
-                       encode_bch(bch, data, len, NULL);
+                       bch_encode(bch, data, len, NULL);
                } else {
                        /* load provided calculated ecc */
                        load_ecc8(bch, bch->ecc_buf, calc_ecc);
@@ -1048,12 +1105,14 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
                                break;
                        }
                        errloc[i] = nbits-1-errloc[i];
-                       errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7));
+                       if (!bch->swap_bits)
+                               errloc[i] = (errloc[i] & ~7) |
+                                           (7-(errloc[i] & 7));
                }
        }
        return (err >= 0) ? err : -EBADMSG;
 }
-EXPORT_SYMBOL_GPL(decode_bch);
+EXPORT_SYMBOL_GPL(bch_decode);
 
 /*
  * generate Galois field lookup tables
@@ -1236,27 +1295,29 @@ finish:
 }
 
 /**
- * init_bch - initialize a BCH encoder/decoder
+ * bch_init - initialize a BCH encoder/decoder
  * @m:          Galois field order, should be in the range 5-15
  * @t:          maximum error correction capability, in bits
  * @prim_poly:  user-provided primitive polynomial (or 0 to use default)
+ * @swap_bits:  swap bits within data and syndrome bytes
  *
  * Returns:
  *  a newly allocated BCH control structure if successful, NULL otherwise
  *
  * This initialization can take some time, as lookup tables are built for fast
  * encoding/decoding; make sure not to call this function from a time critical
- * path. Usually, init_bch() should be called on module/driver init and
- * free_bch() should be called to release memory on exit.
+ * path. Usually, bch_init() should be called on module/driver init and
+ * bch_free() should be called to release memory on exit.
  *
  * You may provide your own primitive polynomial of degree @m in argument
- * @prim_poly, or let init_bch() use its default polynomial.
+ * @prim_poly, or let bch_init() use its default polynomial.
  *
- * Once init_bch() has successfully returned a pointer to a newly allocated
+ * Once bch_init() has successfully returned a pointer to a newly allocated
  * BCH control structure, ecc length in bytes is given by member @ecc_bytes of
  * the structure.
  */
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+                            bool swap_bits)
 {
        int err = 0;
        unsigned int i, words;
@@ -1321,6 +1382,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
        bch->syn       = bch_alloc(2*t*sizeof(*bch->syn), &err);
        bch->cache     = bch_alloc(2*t*sizeof(*bch->cache), &err);
        bch->elp       = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err);
+       bch->swap_bits = swap_bits;
 
        for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++)
                bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err);
@@ -1347,16 +1409,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
        return bch;
 
 fail:
-       free_bch(bch);
+       bch_free(bch);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(init_bch);
+EXPORT_SYMBOL_GPL(bch_init);
 
 /**
- *  free_bch - free the BCH control structure
+ *  bch_free - free the BCH control structure
  *  @bch:    BCH control structure to release
  */
-void free_bch(struct bch_control *bch)
+void bch_free(struct bch_control *bch)
 {
        unsigned int i;
 
@@ -1377,7 +1439,7 @@ void free_bch(struct bch_control *bch)
                kfree(bch);
        }
 }
-EXPORT_SYMBOL_GPL(free_bch);
+EXPORT_SYMBOL_GPL(bch_free);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
index 8186ca8..ce12621 100644 (file)
@@ -106,7 +106,9 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
                unsigned int fail_nth = READ_ONCE(current->fail_nth);
 
                if (fail_nth) {
-                       if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+                       fail_nth--;
+                       WRITE_ONCE(current->fail_nth, fail_nth);
+                       if (!fail_nth)
                                goto fail;
 
                        return false;
index c4d5c45..9b34e03 100644 (file)
@@ -1201,11 +1201,17 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
 
 /*
  * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
- * non-lru movable pages and hugepages). We scan pfn because it's much
- * easier than scanning over linked list. This function returns the pfn
- * of the first found movable page if it's found, otherwise 0.
+ * non-lru movable pages and hugepages). Will skip over most unmovable
+ * pages (esp., pages that can be skipped when offlining), but bail out on
+ * definitely unmovable pages.
+ *
+ * Returns:
+ *     0 in case a movable page is found and movable_pfn was updated.
+ *     -ENOENT in case no movable page was found.
+ *     -EBUSY in case a definitely unmovable page was found.
  */
-static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
+static int scan_movable_pages(unsigned long start, unsigned long end,
+                             unsigned long *movable_pfn)
 {
        unsigned long pfn;
 
@@ -1217,18 +1223,30 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        continue;
                page = pfn_to_page(pfn);
                if (PageLRU(page))
-                       return pfn;
+                       goto found;
                if (__PageMovable(page))
-                       return pfn;
+                       goto found;
+
+               /*
+                * PageOffline() pages that are not marked __PageMovable() and
+                * have a reference count > 0 (after MEM_GOING_OFFLINE) are
+                * definitely unmovable. If their reference count would be 0,
+                * they could at least be skipped when offlining memory.
+                */
+               if (PageOffline(page) && page_count(page))
+                       return -EBUSY;
 
                if (!PageHuge(page))
                        continue;
                head = compound_head(page);
                if (page_huge_active(head))
-                       return pfn;
+                       goto found;
                skip = compound_nr(head) - (page - head);
                pfn += skip - 1;
        }
+       return -ENOENT;
+found:
+       *movable_pfn = pfn;
        return 0;
 }
 
@@ -1491,7 +1509,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
        }
 
        do {
-               for (pfn = start_pfn; pfn;) {
+               pfn = start_pfn;
+               do {
                        if (signal_pending(current)) {
                                ret = -EINTR;
                                reason = "signal backoff";
@@ -1501,14 +1520,19 @@ static int __ref __offline_pages(unsigned long start_pfn,
                        cond_resched();
                        lru_add_drain_all();
 
-                       pfn = scan_movable_pages(pfn, end_pfn);
-                       if (pfn) {
+                       ret = scan_movable_pages(pfn, end_pfn, &pfn);
+                       if (!ret) {
                                /*
                                 * TODO: fatal migration failures should bail
                                 * out
                                 */
                                do_migrate_range(pfn, end_pfn);
                        }
+               } while (!ret);
+
+               if (ret != -ENOENT) {
+                       reason = "unmovable page";
+                       goto failed_removal_isolated;
                }
 
                /*
@@ -1774,4 +1798,41 @@ int remove_memory(int nid, u64 start, u64 size)
        return rc;
 }
 EXPORT_SYMBOL_GPL(remove_memory);
+
+/*
+ * Try to offline and remove a memory block. Might take a long time to
+ * finish in case memory is still in use. Primarily useful for memory devices
+ * that logically unplugged all memory (so it's no longer in use) and want to
+ * offline + remove the memory block.
+ */
+int offline_and_remove_memory(int nid, u64 start, u64 size)
+{
+       struct memory_block *mem;
+       int rc = -EINVAL;
+
+       if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
+           size != memory_block_size_bytes())
+               return rc;
+
+       lock_device_hotplug();
+       mem = find_memory_block(__pfn_to_section(PFN_DOWN(start)));
+       if (mem)
+               rc = device_offline(&mem->dev);
+       /* Ignore if the device is already offline. */
+       if (rc > 0)
+               rc = 0;
+
+       /*
+        * In case we succeeded to offline the memory block, remove it.
+        * This cannot fail as it cannot get onlined in the meantime.
+        */
+       if (!rc) {
+               rc = try_remove_memory(nid, start, size);
+               WARN_ON_ONCE(rc);
+       }
+       unlock_device_hotplug();
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(offline_and_remove_memory);
 #endif /* CONFIG_MEMORY_HOTREMOVE */
index 7277512..48eb0f1 100644 (file)
@@ -8285,6 +8285,19 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
                if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
                        continue;
 
+               /*
+                * We treat all PageOffline() pages as movable when offlining
+                * to give drivers a chance to decrement their reference count
+                * in MEM_GOING_OFFLINE in order to indicate that these pages
+                * can be offlined as there are no direct references anymore.
+                * For actually unmovable PageOffline() where the driver does
+                * not support this, we will fail later when trying to actually
+                * move these pages that still have a reference count > 0.
+                * (false negatives in this function only)
+                */
+               if ((flags & MEMORY_OFFLINE) && PageOffline(page))
+                       continue;
+
                if (__PageMovable(page) || PageLRU(page))
                        continue;
 
@@ -8516,6 +8529,7 @@ done:
                                pfn_max_align_up(end), migratetype);
        return ret;
 }
+EXPORT_SYMBOL(alloc_contig_range);
 
 static int __alloc_contig_pages(unsigned long start_pfn,
                                unsigned long nr_pages, gfp_t gfp_mask)
@@ -8631,6 +8645,7 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
        }
        WARN(count != 0, "%d pages are still in use!\n", count);
 }
+EXPORT_SYMBOL(free_contig_range);
 
 /*
  * The zone indicated has a new number of managed_pages; batch sizes and percpu
@@ -8703,6 +8718,17 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
                        offlined_pages++;
                        continue;
                }
+               /*
+                * At this point all remaining PageOffline() pages have a
+                * reference count of 0 and can simply be skipped.
+                */
+               if (PageOffline(page)) {
+                       BUG_ON(page_count(page));
+                       BUG_ON(PageBuddy(page));
+                       pfn++;
+                       offlined_pages++;
+                       continue;
+               }
 
                BUG_ON(page_count(page));
                BUG_ON(!PageBuddy(page));
index 2c11a38..f6d07c5 100644 (file)
@@ -151,6 +151,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  *                     a bit mask)
  *                     MEMORY_OFFLINE - isolate to offline (!allocate) memory
  *                                      e.g., skip over PageHWPoison() pages
+ *                                      and PageOffline() pages.
  *                     REPORT_FAILURE - report details about the failure to
  *                     isolate the range
  *
@@ -259,6 +260,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
                else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
                        /* A HWPoisoned page cannot be also PageBuddy */
                        pfn++;
+               else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
+                        !page_count(page))
+                       /*
+                        * The responsible driver agreed to skip PageOffline()
+                        * pages when offlining memory by dropping its
+                        * reference in MEM_GOING_OFFLINE.
+                        */
+                       pfn++;
                else
                        break;
        }
index b109cc8..f93f8ac 100644 (file)
@@ -128,7 +128,7 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
                return -ENOMEM;
 
        if (write) {
-               ret = cpumask_parse_user(buffer, *lenp, mask);
+               ret = cpumask_parse(buffer, mask);
                if (ret)
                        goto done;
 
index 78f046e..3ac7c8c 100644 (file)
@@ -376,7 +376,7 @@ static bool nf_remove_net_hook(struct nf_hook_entries *old,
                if (orig_ops[i] != unreg)
                        continue;
                WRITE_ONCE(old->hooks[i].hook, accept_all);
-               WRITE_ONCE(orig_ops[i], &dummy_ops);
+               WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
                return true;
        }
 
index 8b4d72b..010dcb8 100644 (file)
@@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
 
        rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
                        IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
-       if (unlikely((size_t)rc > sizeof(scopebuf)))
+       if (unlikely((size_t)rc >= sizeof(scopebuf)))
                return 0;
 
        len += rc;
-       if (unlikely(len > buflen))
+       if (unlikely(len >= buflen))
                return 0;
 
        strcat(buf, scopebuf);
index 5748ad0..a9f0d17 100644 (file)
@@ -81,7 +81,7 @@ static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp)
        unsigned int nbits;
 
        nbits = *(unsigned int *)kp->arg;
-       return sprintf(buffer, "%u", 1U << nbits);
+       return sprintf(buffer, "%u\n", 1U << nbits);
 }
 
 #define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
index ac5cac0..4ecc2a9 100644 (file)
@@ -254,7 +254,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
        if (IS_ERR(p))
                goto err;
 done:
-       trace_rpcgss_context(ctx->gc_expiry, now, timeout,
+       trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
                             ctx->gc_acceptor.len, ctx->gc_acceptor.data);
 err:
        return p;
@@ -697,10 +697,12 @@ retry:
                }
                schedule();
        }
-       if (gss_msg->ctx)
+       if (gss_msg->ctx) {
+               trace_rpcgss_ctx_init(gss_cred);
                gss_cred_set_ctx(cred, gss_msg->ctx);
-       else
+       } else {
                err = gss_msg->msg.errno;
+       }
        spin_unlock(&pipe->lock);
 out_intr:
        finish_wait(&gss_msg->waitqueue, &wait);
@@ -1054,11 +1056,11 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
        auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
        auth->au_verfsize = GSS_VERF_SLACK >> 2;
        auth->au_ralign = GSS_VERF_SLACK >> 2;
-       auth->au_flags = 0;
+       __set_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags);
        auth->au_ops = &authgss_ops;
        auth->au_flavor = flavor;
        if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
-               auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
+               __set_bit(RPCAUTH_AUTH_DATATOUCH, &auth->au_flags);
        refcount_set(&auth->au_count, 1);
        kref_init(&gss_auth->kref);
 
@@ -1284,8 +1286,9 @@ gss_send_destroy_context(struct rpc_cred *cred)
        if (new) {
                ctx->gc_proc = RPC_GSS_PROC_DESTROY;
 
+               trace_rpcgss_ctx_destroy(gss_cred);
                task = rpc_call_null(gss_auth->client, &new->gc_base,
-                               RPC_TASK_ASYNC|RPC_TASK_SOFT);
+                                    RPC_TASK_ASYNC);
                if (!IS_ERR(task))
                        rpc_put_task(task);
 
@@ -1349,7 +1352,6 @@ gss_destroy_nullcred(struct rpc_cred *cred)
 static void
 gss_destroy_cred(struct rpc_cred *cred)
 {
-
        if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
                gss_send_destroy_context(cred);
        gss_destroy_nullcred(cred);
@@ -1613,6 +1615,7 @@ static int gss_renew_cred(struct rpc_task *task)
        new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
        if (IS_ERR(new))
                return PTR_ERR(new);
+
        task->tk_rqstp->rq_cred = new;
        put_rpccred(oldcred);
        return 0;
@@ -1709,7 +1712,8 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
 
        /* We leave it to unwrap to calculate au_rslack. For now we just
         * calculate the length of the verifier: */
-       cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
+       if (test_bit(RPCAUTH_AUTH_UPDATE_SLACK, &cred->cr_auth->au_flags))
+               cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
        status = 0;
 out:
        gss_put_ctx(ctx);
@@ -1927,13 +1931,30 @@ out:
        return status;
 }
 
-static int
-gss_unwrap_resp_auth(struct rpc_cred *cred)
+/**
+ * gss_update_rslack - Possibly update RPC receive buffer size estimates
+ * @task: rpc_task for incoming RPC Reply being unwrapped
+ * @cred: controlling rpc_cred for @task
+ * @before: XDR words needed before each RPC Reply message
+ * @after: XDR words needed following each RPC Reply message
+ *
+ */
+static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
+                             unsigned int before, unsigned int after)
 {
        struct rpc_auth *auth = cred->cr_auth;
 
-       auth->au_rslack = auth->au_verfsize;
-       auth->au_ralign = auth->au_verfsize;
+       if (test_and_clear_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags)) {
+               auth->au_ralign = auth->au_verfsize + before;
+               auth->au_rslack = auth->au_verfsize + after;
+               trace_rpcgss_update_slack(task, auth);
+       }
+}
+
+static int
+gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
+{
+       gss_update_rslack(task, cred, 0, 0);
        return 0;
 }
 
@@ -1956,7 +1977,6 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
                      struct xdr_stream *xdr)
 {
        struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
-       struct rpc_auth *auth = cred->cr_auth;
        u32 len, offset, seqno, maj_stat;
        struct xdr_netobj mic;
        int ret;
@@ -2005,8 +2025,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
        if (maj_stat != GSS_S_COMPLETE)
                goto bad_mic;
 
-       auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
-       auth->au_ralign = auth->au_verfsize + 2;
+       gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
        ret = 0;
 
 out:
@@ -2031,7 +2050,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
 {
        struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
        struct kvec *head = rqstp->rq_rcv_buf.head;
-       struct rpc_auth *auth = cred->cr_auth;
        u32 offset, opaque_len, maj_stat;
        __be32 *p;
 
@@ -2058,8 +2076,8 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
         */
        xdr_init_decode(xdr, rcv_buf, p, rqstp);
 
-       auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
-       auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
+       gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
+                         2 + ctx->gc_gss_ctx->slack);
 
        return 0;
 unwrap_failed:
@@ -2130,7 +2148,7 @@ gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
                goto out_decode;
        switch (gss_cred->gc_service) {
        case RPC_GSS_SVC_NONE:
-               status = gss_unwrap_resp_auth(cred);
+               status = gss_unwrap_resp_auth(task, cred);
                break;
        case RPC_GSS_SVC_INTEGRITY:
                status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
index 69316ab..fae632d 100644 (file)
@@ -37,6 +37,8 @@ gss_mech_free(struct gss_api_mech *gm)
 
        for (i = 0; i < gm->gm_pf_num; i++) {
                pf = &gm->gm_pfs[i];
+               if (pf->domain)
+                       auth_domain_put(pf->domain);
                kfree(pf->auth_domain_name);
                pf->auth_domain_name = NULL;
        }
@@ -59,6 +61,7 @@ make_auth_domain_name(char *name)
 static int
 gss_mech_svc_setup(struct gss_api_mech *gm)
 {
+       struct auth_domain *dom;
        struct pf_desc *pf;
        int i, status;
 
@@ -68,10 +71,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm)
                status = -ENOMEM;
                if (pf->auth_domain_name == NULL)
                        goto out;
-               status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor,
-                                                       pf->auth_domain_name);
-               if (status)
+               dom = svcauth_gss_register_pseudoflavor(
+                       pf->pseudoflavor, pf->auth_domain_name);
+               if (IS_ERR(dom)) {
+                       status = PTR_ERR(dom);
                        goto out;
+               }
+               pf->domain = dom;
        }
        return 0;
 out:
index 0349f45..af9c7f4 100644 (file)
@@ -223,7 +223,7 @@ static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
 
 static char *gssp_stringify(struct xdr_netobj *netobj)
 {
-       return kstrndup(netobj->data, netobj->len, GFP_KERNEL);
+       return kmemdup_nul(netobj->data, netobj->len, GFP_KERNEL);
 }
 
 static void gssp_hostbased_service(char **principal)
index 50d93c4..46027d0 100644 (file)
@@ -809,7 +809,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
 
 EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
 
-int
+struct auth_domain *
 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
 {
        struct gss_domain       *new;
@@ -826,21 +826,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
        new->h.flavour = &svcauthops_gss;
        new->pseudoflavor = pseudoflavor;
 
-       stat = 0;
        test = auth_domain_lookup(name, &new->h);
-       if (test != &new->h) { /* Duplicate registration */
+       if (test != &new->h) {
+               pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
+                       name);
+               stat = -EADDRINUSE;
                auth_domain_put(test);
-               kfree(new->h.name);
-               goto out_free_dom;
+               goto out_free_name;
        }
-       return 0;
+       return test;
 
+out_free_name:
+       kfree(new->h.name);
 out_free_dom:
        kfree(new);
 out:
-       return stat;
+       return ERR_PTR(stat);
 }
-
 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
 
 static inline int
index 5576f1e..49fa583 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/gss_err.h>
+#include <linux/sunrpc/auth_gss.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/rpcgss.h>
index 61b21da..a91d1cd 100644 (file)
@@ -370,10 +370,6 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
        const char *nodename = args->nodename;
        int err;
 
-       /* sanity check the name before trying to print it */
-       dprintk("RPC:       creating %s client for %s (xprt %p)\n",
-                       program->name, args->servername, xprt);
-
        err = rpciod_up();
        if (err)
                goto out_no_rpciod;
@@ -436,6 +432,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
                goto out_no_path;
        if (parent)
                atomic_inc(&parent->cl_count);
+
+       trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
        return clnt;
 
 out_no_path:
@@ -450,6 +448,7 @@ out_err:
 out_no_rpciod:
        xprt_switch_put(xps);
        xprt_put(xprt);
+       trace_rpc_clnt_new_err(program->name, args->servername, err);
        return ERR_PTR(err);
 }
 
@@ -634,10 +633,8 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
        args->nodename = clnt->cl_nodename;
 
        new = rpc_new_client(args, xps, xprt, clnt);
-       if (IS_ERR(new)) {
-               err = PTR_ERR(new);
-               goto out_err;
-       }
+       if (IS_ERR(new))
+               return new;
 
        /* Turn off autobind on clones */
        new->cl_autobind = 0;
@@ -650,7 +647,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
        return new;
 
 out_err:
-       dprintk("RPC:       %s: returned error %d\n", __func__, err);
+       trace_rpc_clnt_clone_err(clnt, err);
        return ERR_PTR(err);
 }
 
@@ -723,11 +720,8 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
        int err;
 
        xprt = xprt_create_transport(args);
-       if (IS_ERR(xprt)) {
-               dprintk("RPC:       failed to create new xprt for clnt %p\n",
-                       clnt);
+       if (IS_ERR(xprt))
                return PTR_ERR(xprt);
-       }
 
        xps = xprt_switch_alloc(xprt, GFP_KERNEL);
        if (xps == NULL) {
@@ -767,7 +761,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
                rpc_release_client(parent);
        xprt_switch_put(oldxps);
        xprt_put(old);
-       dprintk("RPC:       replaced xprt for clnt %p\n", clnt);
+       trace_rpc_clnt_replace_xprt(clnt);
        return 0;
 
 out_revert:
@@ -777,7 +771,7 @@ out_revert:
        rpc_client_register(clnt, pseudoflavor, NULL);
        xprt_switch_put(xps);
        xprt_put(xprt);
-       dprintk("RPC:       failed to switch xprt for clnt %p\n", clnt);
+       trace_rpc_clnt_replace_xprt_err(clnt);
        return err;
 }
 EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
@@ -844,10 +838,11 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
 
        if (list_empty(&clnt->cl_tasks))
                return;
-       dprintk("RPC:       killing all tasks for client %p\n", clnt);
+
        /*
         * Spin lock all_tasks to prevent changes...
         */
+       trace_rpc_clnt_killall(clnt);
        spin_lock(&clnt->cl_lock);
        list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
                rpc_signal_task(rovr);
@@ -863,9 +858,7 @@ void rpc_shutdown_client(struct rpc_clnt *clnt)
 {
        might_sleep();
 
-       dprintk_rcu("RPC:       shutting down %s client for %s\n",
-                       clnt->cl_program->name,
-                       rcu_dereference(clnt->cl_xprt)->servername);
+       trace_rpc_clnt_shutdown(clnt);
 
        while (!list_empty(&clnt->cl_tasks)) {
                rpc_killall_tasks(clnt);
@@ -884,6 +877,8 @@ static void rpc_free_client_work(struct work_struct *work)
 {
        struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
 
+       trace_rpc_clnt_free(clnt);
+
        /* These might block on processes that might allocate memory,
         * so they cannot be called in rpciod, so they are handled separately
         * here.
@@ -901,9 +896,7 @@ rpc_free_client(struct rpc_clnt *clnt)
 {
        struct rpc_clnt *parent = NULL;
 
-       dprintk_rcu("RPC:       destroying %s client for %s\n",
-                       clnt->cl_program->name,
-                       rcu_dereference(clnt->cl_xprt)->servername);
+       trace_rpc_clnt_release(clnt);
        if (clnt->cl_parent != clnt)
                parent = clnt->cl_parent;
        rpc_unregister_client(clnt);
@@ -945,8 +938,6 @@ rpc_free_auth(struct rpc_clnt *clnt)
 void
 rpc_release_client(struct rpc_clnt *clnt)
 {
-       dprintk("RPC:       rpc_release_client(%p)\n", clnt);
-
        do {
                if (list_empty(&clnt->cl_tasks))
                        wake_up(&destroy_wait);
@@ -1270,7 +1261,7 @@ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
        hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
 
        xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
-       trace_rpc_reply_pages(req);
+       trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf);
 }
 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
 
@@ -1624,6 +1615,7 @@ const char
 static void
 __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
 {
+       trace_rpc_call_rpcerror(task, tk_status, rpc_status);
        task->tk_rpc_status = rpc_status;
        rpc_exit(task, tk_status);
 }
@@ -2531,7 +2523,7 @@ call_decode(struct rpc_task *task)
                goto out;
 
        req->rq_rcv_buf.len = req->rq_private_buf.len;
-       trace_xprt_recvfrom(&req->rq_rcv_buf);
+       trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
 
        /* Check that the softirq receive buffer is valid */
        WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
@@ -2760,7 +2752,8 @@ struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
                .rpc_op_cred = cred,
                .callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
                .callback_data = data,
-               .flags = flags | RPC_TASK_NULLCREDS,
+               .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
+                        RPC_TASK_NULLCREDS,
        };
 
        return rpc_run_task(&task_setup_data);
@@ -2823,8 +2816,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
                goto success;
        }
 
-       task = rpc_call_null_helper(clnt, xprt, NULL,
-                       RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS,
+       task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
                        &rpc_cb_add_xprt_call_ops, data);
 
        rpc_put_task(task);
@@ -2867,9 +2859,7 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
                goto out_err;
 
        /* Test the connection */
-       task = rpc_call_null_helper(clnt, xprt, NULL,
-                                   RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
-                                   NULL, NULL);
+       task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
        if (IS_ERR(task)) {
                status = PTR_ERR(task);
                goto out_err;
index 4a020b6..c27123e 100644 (file)
@@ -795,12 +795,6 @@ void rpcb_getport_async(struct rpc_task *task)
 
        child = rpcb_call_async(rpcb_clnt, map, proc);
        rpc_release_client(rpcb_clnt);
-       if (IS_ERR(child)) {
-               /* rpcb_map_release() has freed the arguments */
-               dprintk("RPC: %5u %s: rpc_run_task failed\n",
-                       task->tk_pid, __func__);
-               return;
-       }
 
        xprt->stat.bind_count++;
        rpc_put_task(child);
index 47a7565..f6fe2e6 100644 (file)
@@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk)
 
 int rpc_clients_notifier_register(void);
 void rpc_clients_notifier_unregister(void);
+void auth_domain_cleanup(void);
 #endif /* _NET_SUNRPC_SUNRPC_H */
index f9edaa9..236fadc 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/sunrpc/rpc_pipe_fs.h>
 #include <linux/sunrpc/xprtsock.h>
 
+#include "sunrpc.h"
 #include "netns.h"
 
 unsigned int sunrpc_net_id;
@@ -131,6 +132,7 @@ cleanup_sunrpc(void)
        unregister_rpc_pipefs();
        rpc_destroy_mempool();
        unregister_pernet_subsys(&sunrpc_net_ops);
+       auth_domain_cleanup();
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        rpc_unregister_sysctl();
 #endif
index 9ed3126..c211b60 100644 (file)
@@ -88,15 +88,15 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp)
        switch (*ip)
        {
        case SVC_POOL_AUTO:
-               return strlcpy(buf, "auto", 20);
+               return strlcpy(buf, "auto\n", 20);
        case SVC_POOL_GLOBAL:
-               return strlcpy(buf, "global", 20);
+               return strlcpy(buf, "global\n", 20);
        case SVC_POOL_PERCPU:
-               return strlcpy(buf, "percpu", 20);
+               return strlcpy(buf, "percpu\n", 20);
        case SVC_POOL_PERNODE:
-               return strlcpy(buf, "pernode", 20);
+               return strlcpy(buf, "pernode\n", 20);
        default:
-               return sprintf(buf, "%d", *ip);
+               return sprintf(buf, "%d\n", *ip);
        }
 }
 
@@ -991,6 +991,7 @@ static int __svc_register(struct net *net, const char *progname,
 #endif
        }
 
+       trace_svc_register(progname, version, protocol, port, family, error);
        return error;
 }
 
@@ -1000,11 +1001,6 @@ int svc_rpcbind_set_version(struct net *net,
                            unsigned short proto,
                            unsigned short port)
 {
-       dprintk("svc: svc_register(%sv%d, %s, %u, %u)\n",
-               progp->pg_name, version,
-               proto == IPPROTO_UDP?  "udp" : "tcp",
-               port, family);
-
        return __svc_register(net, progp->pg_name, progp->pg_prog,
                                version, family, proto, port);
 
@@ -1024,11 +1020,8 @@ int svc_generic_rpcbind_set(struct net *net,
                return 0;
 
        if (vers->vs_hidden) {
-               dprintk("svc: svc_register(%sv%d, %s, %u, %u)"
-                       " (but not telling portmap)\n",
-                       progp->pg_name, version,
-                       proto == IPPROTO_UDP?  "udp" : "tcp",
-                       port, family);
+               trace_svc_noregister(progp->pg_name, version, proto,
+                                    port, family, 0);
                return 0;
        }
 
@@ -1106,8 +1099,7 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi
        if (error == -EPROTONOSUPPORT)
                error = rpcb_register(net, program, version, 0, 0);
 
-       dprintk("svc: %s(%sv%u), error %d\n",
-                       __func__, progname, version, error);
+       trace_svc_unregister(progname, version, error);
 }
 
 /*
@@ -1132,9 +1124,6 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
                                continue;
                        if (progp->pg_vers[i]->vs_hidden)
                                continue;
-
-                       dprintk("svc: attempting to unregister %sv%u\n",
-                               progp->pg_name, i);
                        __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
                }
        }
index 2284ff0..43cf8db 100644 (file)
@@ -153,6 +153,7 @@ static void svc_xprt_free(struct kref *kref)
                xprt_put(xprt->xpt_bc_xprt);
        if (xprt->xpt_bc_xps)
                xprt_switch_put(xprt->xpt_bc_xps);
+       trace_svc_xprt_free(xprt);
        xprt->xpt_ops->xpo_free(xprt);
        module_put(owner);
 }
@@ -206,6 +207,7 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                .sin6_port              = htons(port),
        };
 #endif
+       struct svc_xprt *xprt;
        struct sockaddr *sap;
        size_t len;
 
@@ -224,7 +226,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                return ERR_PTR(-EAFNOSUPPORT);
        }
 
-       return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
+       xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
+       if (IS_ERR(xprt))
+               trace_svc_xprt_create_err(serv->sv_program->pg_name,
+                                         xcl->xcl_name, sap, xprt);
+       return xprt;
 }
 
 /*
@@ -304,15 +310,11 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
 {
        int err;
 
-       dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
        err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
        if (err == -EPROTONOSUPPORT) {
                request_module("svc%s", xprt_name);
                err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
        }
-       if (err < 0)
-               dprintk("svc: transport %s not found, err %d\n",
-                       xprt_name, -err);
        return err;
 }
 EXPORT_SYMBOL_GPL(svc_create_xprt);
@@ -780,7 +782,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
        int len = 0;
 
        if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
-               dprintk("svc_recv: found XPT_CLOSE\n");
                if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
                        xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
                svc_delete_xprt(xprt);
@@ -799,6 +800,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
                if (newxpt) {
                        newxpt->xpt_cred = get_cred(xprt->xpt_cred);
                        svc_add_new_temp_xprt(serv, newxpt);
+                       trace_svc_xprt_accept(newxpt, serv->sv_name);
                } else
                        module_put(xprt->xpt_class->xcl_owner);
        } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
@@ -812,7 +814,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
                else
                        len = xprt->xpt_ops->xpo_recvfrom(rqstp);
                if (len > 0)
-                       trace_svc_recvfrom(&rqstp->rq_arg);
+                       trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
                rqstp->rq_stime = ktime_get();
                rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
@@ -835,14 +837,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        struct svc_serv         *serv = rqstp->rq_server;
        int                     len, err;
 
-       dprintk("svc: server %p waiting for data (to = %ld)\n",
-               rqstp, timeout);
-
-       if (rqstp->rq_xprt)
-               printk(KERN_ERR
-                       "svc_recv: service %p, transport not NULL!\n",
-                        rqstp);
-
        err = svc_alloc_arg(rqstp);
        if (err)
                goto out;
@@ -890,7 +884,6 @@ EXPORT_SYMBOL_GPL(svc_recv);
 void svc_drop(struct svc_rqst *rqstp)
 {
        trace_svc_drop(rqstp);
-       dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
        svc_xprt_release(rqstp);
 }
 EXPORT_SYMBOL_GPL(svc_drop);
@@ -913,17 +906,11 @@ int svc_send(struct svc_rqst *rqstp)
        xb->len = xb->head[0].iov_len +
                xb->page_len +
                xb->tail[0].iov_len;
-       trace_svc_sendto(xb);
-
-       /* Grab mutex to serialize outgoing data. */
-       mutex_lock(&xprt->xpt_mutex);
+       trace_svc_xdr_sendto(rqstp, xb);
        trace_svc_stats_latency(rqstp);
-       if (test_bit(XPT_DEAD, &xprt->xpt_flags)
-                       || test_bit(XPT_CLOSE, &xprt->xpt_flags))
-               len = -ENOTCONN;
-       else
-               len = xprt->xpt_ops->xpo_sendto(rqstp);
-       mutex_unlock(&xprt->xpt_mutex);
+
+       len = xprt->xpt_ops->xpo_sendto(rqstp);
+
        trace_svc_send(rqstp, len);
        svc_xprt_release(rqstp);
 
@@ -1031,11 +1018,10 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
        struct svc_serv *serv = xprt->xpt_server;
        struct svc_deferred_req *dr;
 
-       /* Only do this once */
        if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
-               BUG();
+               return;
 
-       dprintk("svc: svc_delete_xprt(%p)\n", xprt);
+       trace_svc_xprt_detach(xprt);
        xprt->xpt_ops->xpo_detach(xprt);
        if (xprt->xpt_bc_xprt)
                xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
@@ -1056,6 +1042,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
 
 void svc_close_xprt(struct svc_xprt *xprt)
 {
+       trace_svc_xprt_close(xprt);
        set_bit(XPT_CLOSE, &xprt->xpt_flags);
        if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
                /* someone else will have to effect the close */
@@ -1158,16 +1145,15 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
        set_bit(XPT_DEFERRED, &xprt->xpt_flags);
        if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
                spin_unlock(&xprt->xpt_lock);
-               dprintk("revisit canceled\n");
+               trace_svc_defer_drop(dr);
                svc_xprt_put(xprt);
-               trace_svc_drop_deferred(dr);
                kfree(dr);
                return;
        }
-       dprintk("revisit queued\n");
        dr->xprt = NULL;
        list_add(&dr->handle.recent, &xprt->xpt_deferred);
        spin_unlock(&xprt->xpt_lock);
+       trace_svc_defer_queue(dr);
        svc_xprt_enqueue(xprt);
        svc_xprt_put(xprt);
 }
@@ -1213,22 +1199,24 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
                       dr->argslen << 2);
        }
+       trace_svc_defer(rqstp);
        svc_xprt_get(rqstp->rq_xprt);
        dr->xprt = rqstp->rq_xprt;
        set_bit(RQ_DROPME, &rqstp->rq_flags);
 
        dr->handle.revisit = svc_revisit;
-       trace_svc_defer(rqstp);
        return &dr->handle;
 }
 
 /*
  * recv data from a deferred request into an active one
  */
-static int svc_deferred_recv(struct svc_rqst *rqstp)
+static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
 {
        struct svc_deferred_req *dr = rqstp->rq_deferred;
 
+       trace_svc_defer_recv(dr);
+
        /* setup iov_base past transport header */
        rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
        /* The iov_len does not include the transport header bytes */
@@ -1259,7 +1247,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
                                struct svc_deferred_req,
                                handle.recent);
                list_del_init(&dr->handle.recent);
-               trace_svc_revisit_deferred(dr);
        } else
                clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
        spin_unlock(&xprt->xpt_lock);
index 552617e..998b196 100644 (file)
@@ -21,6 +21,8 @@
 
 #include <trace/events/sunrpc.h>
 
+#include "sunrpc.h"
+
 #define RPCDBG_FACILITY        RPCDBG_AUTH
 
 
@@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
        return NULL;
 }
 EXPORT_SYMBOL_GPL(auth_domain_find);
+
+/**
+ * auth_domain_cleanup - check that the auth_domain table is empty
+ *
+ * On module unload the auth_domain_table must be empty.  To make it
+ * easier to catch bugs which don't clean up domains properly, we
+ * warn if anything remains in the table at cleanup time.
+ *
+ * Note that we cannot proactively remove the domains at this stage.
+ * The ->release() function might be in a module that has already been
+ * unloaded.
+ */
+
+void auth_domain_cleanup(void)
+{
+       int h;
+       struct auth_domain *hp;
+
+       for (h = 0; h < DN_HASHMAX; h++)
+               hlist_for_each_entry(hp, &auth_domain_table[h], hash)
+                       pr_warn("svc: domain %s still present at module unload.\n",
+                               hp->name);
+}
index 6c8f802..97c0bdd 100644 (file)
@@ -332,15 +332,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
        return 0;
 }
 
-static inline int ip_map_update(struct net *net, struct ip_map *ipm,
-               struct unix_domain *udom, time64_t expiry)
-{
-       struct sunrpc_net *sn;
-
-       sn = net_generic(net, sunrpc_net_id);
-       return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
-}
-
 void svcauth_unix_purge(struct net *net)
 {
        struct sunrpc_net *sn;
index e7a0037..5c4ec93 100644 (file)
@@ -45,7 +45,6 @@
 #include <net/tcp_states.h>
 #include <linux/uaccess.h>
 #include <asm/ioctls.h>
-#include <trace/events/skb.h>
 
 #include <linux/sunrpc/types.h>
 #include <linux/sunrpc/clnt.h>
@@ -55,6 +54,8 @@
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/xprt.h>
 
+#include <trace/events/sunrpc.h>
+
 #include "socklib.h"
 #include "sunrpc.h"
 
@@ -108,31 +109,35 @@ static void svc_reclassify_socket(struct socket *sock)
 }
 #endif
 
-/*
- * Release an skbuff after use
+/**
+ * svc_tcp_release_rqst - Release transport-related resources
+ * @rqstp: request structure with resources to be released
+ *
  */
-static void svc_release_skb(struct svc_rqst *rqstp)
+static void svc_tcp_release_rqst(struct svc_rqst *rqstp)
 {
        struct sk_buff *skb = rqstp->rq_xprt_ctxt;
 
        if (skb) {
                struct svc_sock *svsk =
                        container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
-               rqstp->rq_xprt_ctxt = NULL;
 
-               dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
+               rqstp->rq_xprt_ctxt = NULL;
                skb_free_datagram_locked(svsk->sk_sk, skb);
        }
 }
 
-static void svc_release_udp_skb(struct svc_rqst *rqstp)
+/**
+ * svc_udp_release_rqst - Release transport-related resources
+ * @rqstp: request structure with resources to be released
+ *
+ */
+static void svc_udp_release_rqst(struct svc_rqst *rqstp)
 {
        struct sk_buff *skb = rqstp->rq_xprt_ctxt;
 
        if (skb) {
                rqstp->rq_xprt_ctxt = NULL;
-
-               dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
                consume_skb(skb);
        }
 }
@@ -218,34 +223,68 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
        return len;
 }
 
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek)
+{
+       struct bvec_iter bi = {
+               .bi_size        = size,
+       };
+       struct bio_vec bv;
+
+       bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
+       for_each_bvec(bv, bvec, bi, bi)
+               flush_dcache_page(bv.bv_page);
+}
+#else
+static inline void svc_flush_bvec(const struct bio_vec *bvec, size_t size,
+                                 size_t seek)
+{
+}
+#endif
+
 /*
- * Generic recvfrom routine.
+ * Read from @rqstp's transport socket. The incoming message fills whole
+ * pages in @rqstp's rq_pages array until the last page of the message
+ * has been received into a partial page.
  */
-static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
-                           unsigned int nr, size_t buflen, unsigned int base)
+static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
+                               size_t seek)
 {
        struct svc_sock *svsk =
                container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
+       struct bio_vec *bvec = rqstp->rq_bvec;
        struct msghdr msg = { NULL };
+       unsigned int i;
        ssize_t len;
+       size_t t;
 
        rqstp->rq_xprt_hlen = 0;
 
        clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
-       iov_iter_kvec(&msg.msg_iter, READ, iov, nr, buflen);
-       if (base != 0) {
-               iov_iter_advance(&msg.msg_iter, base);
-               buflen -= base;
+
+       for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE) {
+               bvec[i].bv_page = rqstp->rq_pages[i];
+               bvec[i].bv_len = PAGE_SIZE;
+               bvec[i].bv_offset = 0;
+       }
+       rqstp->rq_respages = &rqstp->rq_pages[i];
+       rqstp->rq_next_page = rqstp->rq_respages + 1;
+
+       iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen);
+       if (seek) {
+               iov_iter_advance(&msg.msg_iter, seek);
+               buflen -= seek;
        }
        len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
+       if (len > 0)
+               svc_flush_bvec(bvec, len, seek);
+
        /* If we read a full record, then assume there may be more
         * data to read (stream based sockets only!)
         */
        if (len == buflen)
                set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 
-       dprintk("svc: socket %p recvfrom(%p, %zu) = %zd\n",
-               svsk, iov[0].iov_base, iov[0].iov_len, len);
        return len;
 }
 
@@ -282,13 +321,10 @@ static void svc_data_ready(struct sock *sk)
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
 
        if (svsk) {
-               dprintk("svc: socket %p(inet %p), busy=%d\n",
-                       svsk, sk,
-                       test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
-
                /* Refer to svc_setup_socket() for details. */
                rmb();
                svsk->sk_odata(sk);
+               trace_svcsock_data_ready(&svsk->sk_xprt, 0);
                if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
                        svc_xprt_enqueue(&svsk->sk_xprt);
        }
@@ -302,11 +338,9 @@ static void svc_write_space(struct sock *sk)
        struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
 
        if (svsk) {
-               dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
-                       svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
-
                /* Refer to svc_setup_socket() for details. */
                rmb();
+               trace_svcsock_write_space(&svsk->sk_xprt, 0);
                svsk->sk_owspace(sk);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
@@ -383,8 +417,15 @@ static int svc_udp_get_dest_address(struct svc_rqst *rqstp,
        return 0;
 }
 
-/*
- * Receive a datagram from a UDP socket.
+/**
+ * svc_udp_recvfrom - Receive a datagram from a UDP socket.
+ * @rqstp: request structure into which to receive an RPC Call
+ *
+ * Called in a loop when XPT_DATA has been set.
+ *
+ * Returns:
+ *   On success, the number of bytes in a received RPC Call, or
+ *   %0 if a complete RPC Call message was not ready to return
  */
 static int svc_udp_recvfrom(struct svc_rqst *rqstp)
 {
@@ -418,20 +459,14 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
            svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
 
        clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
-       skb = NULL;
        err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
                             0, 0, MSG_PEEK | MSG_DONTWAIT);
-       if (err >= 0)
-               skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
-
-       if (skb == NULL) {
-               if (err != -EAGAIN) {
-                       /* possibly an icmp error */
-                       dprintk("svc: recvfrom returned error %d\n", -err);
-                       set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
-               }
-               return 0;
-       }
+       if (err < 0)
+               goto out_recv_err;
+       skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
+       if (!skb)
+               goto out_recv_err;
+
        len = svc_addr_len(svc_addr(rqstp));
        rqstp->rq_addrlen = len;
        if (skb->tstamp == 0) {
@@ -442,26 +477,21 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
        sock_write_timestamp(svsk->sk_sk, skb->tstamp);
        set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
 
-       len  = skb->len;
+       len = skb->len;
        rqstp->rq_arg.len = len;
+       trace_svcsock_udp_recv(&svsk->sk_xprt, len);
 
        rqstp->rq_prot = IPPROTO_UDP;
 
-       if (!svc_udp_get_dest_address(rqstp, cmh)) {
-               net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
-                                    cmh->cmsg_level, cmh->cmsg_type);
-               goto out_free;
-       }
+       if (!svc_udp_get_dest_address(rqstp, cmh))
+               goto out_cmsg_err;
        rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
 
        if (skb_is_nonlinear(skb)) {
                /* we have to copy */
                local_bh_disable();
-               if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
-                       local_bh_enable();
-                       /* checksum error */
-                       goto out_free;
-               }
+               if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb))
+                       goto out_bh_enable;
                local_bh_enable();
                consume_skb(skb);
        } else {
@@ -489,6 +519,20 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
                serv->sv_stats->netudpcnt++;
 
        return len;
+
+out_recv_err:
+       if (err != -EAGAIN) {
+               /* possibly an icmp error */
+               set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+       }
+       trace_svcsock_udp_recv_err(&svsk->sk_xprt, err);
+       return 0;
+out_cmsg_err:
+       net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
+                            cmh->cmsg_level, cmh->cmsg_type);
+       goto out_free;
+out_bh_enable:
+       local_bh_enable();
 out_free:
        kfree_skb(skb);
        return 0;
@@ -498,6 +542,9 @@ out_free:
  * svc_udp_sendto - Send out a reply on a UDP socket
  * @rqstp: completed svc_rqst
  *
+ * xpt_mutex ensures @rqstp's whole message is written to the socket
+ * without interruption.
+ *
  * Returns the number of bytes sent, or a negative errno.
  */
 static int svc_udp_sendto(struct svc_rqst *rqstp)
@@ -519,10 +566,15 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
        unsigned int uninitialized_var(sent);
        int err;
 
-       svc_release_udp_skb(rqstp);
+       svc_udp_release_rqst(rqstp);
 
        svc_set_cmsg_data(rqstp, cmh);
 
+       mutex_lock(&xprt->xpt_mutex);
+
+       if (svc_xprt_is_dead(xprt))
+               goto out_notconn;
+
        err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
        xdr_free_bvec(xdr);
        if (err == -ECONNREFUSED) {
@@ -530,9 +582,16 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
                err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
                xdr_free_bvec(xdr);
        }
+       trace_svcsock_udp_send(xprt, err);
+
+       mutex_unlock(&xprt->xpt_mutex);
        if (err < 0)
                return err;
        return sent;
+
+out_notconn:
+       mutex_unlock(&xprt->xpt_mutex);
+       return -ENOTCONN;
 }
 
 static int svc_udp_has_wspace(struct svc_xprt *xprt)
@@ -576,7 +635,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
        .xpo_recvfrom = svc_udp_recvfrom,
        .xpo_sendto = svc_udp_sendto,
        .xpo_read_payload = svc_sock_read_payload,
-       .xpo_release_rqst = svc_release_udp_skb,
+       .xpo_release_rqst = svc_udp_release_rqst,
        .xpo_detach = svc_sock_detach,
        .xpo_free = svc_sock_free,
        .xpo_has_wspace = svc_udp_has_wspace,
@@ -632,9 +691,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
 
-       dprintk("svc: socket %p TCP (listen) state change %d\n",
-               sk, sk->sk_state);
-
        if (svsk) {
                /* Refer to svc_setup_socket() for details. */
                rmb();
@@ -655,8 +711,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
                if (svsk) {
                        set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
                        svc_xprt_enqueue(&svsk->sk_xprt);
-               } else
-                       printk("svc: socket %p: no user data\n", sk);
+               }
        }
 }
 
@@ -667,15 +722,11 @@ static void svc_tcp_state_change(struct sock *sk)
 {
        struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
 
-       dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
-               sk, sk->sk_state, sk->sk_user_data);
-
-       if (!svsk)
-               printk("svc: socket %p: no user data\n", sk);
-       else {
+       if (svsk) {
                /* Refer to svc_setup_socket() for details. */
                rmb();
                svsk->sk_ostate(sk);
+               trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock);
                if (sk->sk_state != TCP_ESTABLISHED) {
                        set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
                        svc_xprt_enqueue(&svsk->sk_xprt);
@@ -696,9 +747,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
        struct socket   *newsock;
        struct svc_sock *newsvsk;
        int             err, slen;
-       RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
 
-       dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
        if (!sock)
                return NULL;
 
@@ -711,30 +760,18 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
                else if (err != -EAGAIN)
                        net_warn_ratelimited("%s: accept failed (err %d)!\n",
                                             serv->sv_name, -err);
+               trace_svcsock_accept_err(xprt, serv->sv_name, err);
                return NULL;
        }
        set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 
        err = kernel_getpeername(newsock, sin);
        if (err < 0) {
-               net_warn_ratelimited("%s: peername failed (err %d)!\n",
-                                    serv->sv_name, -err);
+               trace_svcsock_getpeername_err(xprt, serv->sv_name, err);
                goto failed;            /* aborted connection or whatever */
        }
        slen = err;
 
-       /* Ideally, we would want to reject connections from unauthorized
-        * hosts here, but when we get encryption, the IP of the host won't
-        * tell us anything.  For now just warn about unpriv connections.
-        */
-       if (!svc_port_is_privileged(sin)) {
-               dprintk("%s: connect from unprivileged port: %s\n",
-                       serv->sv_name,
-                       __svc_print_addr(sin, buf, sizeof(buf)));
-       }
-       dprintk("%s: connect from %s\n", serv->sv_name,
-               __svc_print_addr(sin, buf, sizeof(buf)));
-
        /* Reset the inherited callbacks before calling svc_setup_socket */
        newsock->sk->sk_state_change = svsk->sk_ostate;
        newsock->sk->sk_data_ready = svsk->sk_odata;
@@ -752,10 +789,8 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
        svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen);
        err = kernel_getsockname(newsock, sin);
        slen = err;
-       if (unlikely(err < 0)) {
-               dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
+       if (unlikely(err < 0))
                slen = offsetof(struct sockaddr, sa_data);
-       }
        svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen);
 
        if (sock_is_loopback(newsock->sk))
@@ -772,13 +807,14 @@ failed:
        return NULL;
 }
 
-static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
+static size_t svc_tcp_restore_pages(struct svc_sock *svsk,
+                                   struct svc_rqst *rqstp)
 {
-       unsigned int i, len, npages;
+       size_t len = svsk->sk_datalen;
+       unsigned int i, npages;
 
-       if (svsk->sk_datalen == 0)
+       if (!len)
                return 0;
-       len = svsk->sk_datalen;
        npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        for (i = 0; i < npages; i++) {
                if (rqstp->rq_pages[i] != NULL)
@@ -827,47 +863,45 @@ out:
 }
 
 /*
- * Receive fragment record header.
- * If we haven't gotten the record length yet, get the next four bytes.
+ * Receive fragment record header into sk_marker.
  */
-static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
+static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
+                                  struct svc_rqst *rqstp)
 {
-       struct svc_serv *serv = svsk->sk_xprt.xpt_server;
-       unsigned int want;
-       int len;
+       ssize_t want, len;
 
+       /* If we haven't gotten the record length yet,
+        * get the next four bytes.
+        */
        if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
+               struct msghdr   msg = { NULL };
                struct kvec     iov;
 
                want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
-               iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
+               iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
                iov.iov_len  = want;
-               len = svc_recvfrom(rqstp, &iov, 1, want, 0);
+               iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want);
+               len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
                if (len < 0)
-                       goto error;
+                       return len;
                svsk->sk_tcplen += len;
-
                if (len < want) {
-                       dprintk("svc: short recvfrom while reading record "
-                               "length (%d of %d)\n", len, want);
-                       return -EAGAIN;
+                       /* call again to read the remaining bytes */
+                       goto err_short;
                }
-
-               dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk));
+               trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker);
                if (svc_sock_reclen(svsk) + svsk->sk_datalen >
-                                                       serv->sv_max_mesg) {
-                       net_notice_ratelimited("RPC: fragment too large: %d\n",
-                                       svc_sock_reclen(svsk));
-                       goto err_delete;
-               }
+                   svsk->sk_xprt.xpt_server->sv_max_mesg)
+                       goto err_too_large;
        }
-
        return svc_sock_reclen(svsk);
-error:
-       dprintk("RPC: TCP recv_record got %d\n", len);
-       return len;
-err_delete:
+
+err_too_large:
+       net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
+                              __func__, svsk->sk_xprt.xpt_server->sv_name,
+                              svc_sock_reclen(svsk));
        set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+err_short:
        return -EAGAIN;
 }
 
@@ -916,87 +950,58 @@ unlock_eagain:
        return -EAGAIN;
 }
 
-static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
-{
-       int i = 0;
-       int t = 0;
-
-       while (t < len) {
-               vec[i].iov_base = page_address(pages[i]);
-               vec[i].iov_len = PAGE_SIZE;
-               i++;
-               t += PAGE_SIZE;
-       }
-       return i;
-}
-
 static void svc_tcp_fragment_received(struct svc_sock *svsk)
 {
        /* If we have more data, signal svc_xprt_enqueue() to try again */
-       dprintk("svc: TCP %s record (%d bytes)\n",
-               svc_sock_final_rec(svsk) ? "final" : "nonfinal",
-               svc_sock_reclen(svsk));
        svsk->sk_tcplen = 0;
-       svsk->sk_reclen = 0;
+       svsk->sk_marker = xdr_zero;
 }
 
-/*
- * Receive data from a TCP socket.
+/**
+ * svc_tcp_recvfrom - Receive data from a TCP socket
+ * @rqstp: request structure into which to receive an RPC Call
+ *
+ * Called in a loop when XPT_DATA has been set.
+ *
+ * Read the 4-byte stream record marker, then use the record length
+ * in that marker to set up exactly the resources needed to receive
+ * the next RPC message into @rqstp.
+ *
+ * Returns:
+ *   On success, the number of bytes in a received RPC Call, or
+ *   %0 if a complete RPC Call message was not ready to return
+ *
+ * The zero return case handles partial receives and callback Replies.
+ * The state of a partial receive is preserved in the svc_sock for
+ * the next call to svc_tcp_recvfrom.
  */
 static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
 {
        struct svc_sock *svsk =
                container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
        struct svc_serv *serv = svsk->sk_xprt.xpt_server;
-       int             len;
-       struct kvec *vec;
-       unsigned int want, base;
+       size_t want, base;
+       ssize_t len;
        __be32 *p;
        __be32 calldir;
-       int pnum;
-
-       dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
-               svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
-               test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
-               test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
 
-       len = svc_tcp_recv_record(svsk, rqstp);
+       clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+       len = svc_tcp_read_marker(svsk, rqstp);
        if (len < 0)
                goto error;
 
        base = svc_tcp_restore_pages(svsk, rqstp);
-       want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
-
-       vec = rqstp->rq_vec;
-
-       pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], base + want);
-
-       rqstp->rq_respages = &rqstp->rq_pages[pnum];
-       rqstp->rq_next_page = rqstp->rq_respages + 1;
-
-       /* Now receive data */
-       len = svc_recvfrom(rqstp, vec, pnum, base + want, base);
+       want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
+       len = svc_tcp_read_msg(rqstp, base + want, base);
        if (len >= 0) {
+               trace_svcsock_tcp_recv(&svsk->sk_xprt, len);
                svsk->sk_tcplen += len;
                svsk->sk_datalen += len;
        }
-       if (len != want || !svc_sock_final_rec(svsk)) {
-               svc_tcp_save_pages(svsk, rqstp);
-               if (len < 0 && len != -EAGAIN)
-                       goto err_delete;
-               if (len == want)
-                       svc_tcp_fragment_received(svsk);
-               else
-                       dprintk("svc: incomplete TCP record (%d of %d)\n",
-                               (int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)),
-                               svc_sock_reclen(svsk));
-               goto err_noclose;
-       }
-
-       if (svsk->sk_datalen < 8) {
-               svsk->sk_datalen = 0;
-               goto err_delete; /* client is nuts. */
-       }
+       if (len != want || !svc_sock_final_rec(svsk))
+               goto err_incomplete;
+       if (svsk->sk_datalen < 8)
+               goto err_nuts;
 
        rqstp->rq_arg.len = svsk->sk_datalen;
        rqstp->rq_arg.page_base = 0;
@@ -1031,14 +1036,26 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
 
        return rqstp->rq_arg.len;
 
+err_incomplete:
+       svc_tcp_save_pages(svsk, rqstp);
+       if (len < 0 && len != -EAGAIN)
+               goto err_delete;
+       if (len == want)
+               svc_tcp_fragment_received(svsk);
+       else
+               trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
+                               svc_sock_reclen(svsk),
+                               svsk->sk_tcplen - sizeof(rpc_fraghdr));
+       goto err_noclose;
 error:
        if (len != -EAGAIN)
                goto err_delete;
-       dprintk("RPC: TCP recvfrom got EAGAIN\n");
+       trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0);
        return 0;
+err_nuts:
+       svsk->sk_datalen = 0;
 err_delete:
-       printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
-              svsk->sk_xprt.xpt_server->sv_name, -len);
+       trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
        set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
 err_noclose:
        return 0;       /* record not complete */
@@ -1048,6 +1065,9 @@ err_noclose:
  * svc_tcp_sendto - Send out a reply on a TCP socket
  * @rqstp: completed svc_rqst
  *
+ * xpt_mutex ensures @rqstp's whole message is written to the socket
+ * without interruption.
+ *
  * Returns the number of bytes sent, or a negative errno.
  */
 static int svc_tcp_sendto(struct svc_rqst *rqstp)
@@ -1063,14 +1083,22 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
        unsigned int uninitialized_var(sent);
        int err;
 
-       svc_release_skb(rqstp);
+       svc_tcp_release_rqst(rqstp);
 
+       mutex_lock(&xprt->xpt_mutex);
+       if (svc_xprt_is_dead(xprt))
+               goto out_notconn;
        err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
        xdr_free_bvec(xdr);
+       trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
        if (err < 0 || sent != (xdr->len + sizeof(marker)))
                goto out_close;
+       mutex_unlock(&xprt->xpt_mutex);
        return sent;
 
+out_notconn:
+       mutex_unlock(&xprt->xpt_mutex);
+       return -ENOTCONN;
 out_close:
        pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
                  xprt->xpt_server->sv_name,
@@ -1078,6 +1106,7 @@ out_close:
                  (err < 0) ? err : sent, xdr->len);
        set_bit(XPT_CLOSE, &xprt->xpt_flags);
        svc_xprt_enqueue(xprt);
+       mutex_unlock(&xprt->xpt_mutex);
        return -EAGAIN;
 }
 
@@ -1094,7 +1123,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
        .xpo_recvfrom = svc_tcp_recvfrom,
        .xpo_sendto = svc_tcp_sendto,
        .xpo_read_payload = svc_sock_read_payload,
-       .xpo_release_rqst = svc_release_skb,
+       .xpo_release_rqst = svc_tcp_release_rqst,
        .xpo_detach = svc_tcp_sock_detach,
        .xpo_free = svc_sock_free,
        .xpo_has_wspace = svc_tcp_has_wspace,
@@ -1132,18 +1161,16 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
        set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
        set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
        if (sk->sk_state == TCP_LISTEN) {
-               dprintk("setting up TCP socket for listening\n");
                strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
                set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
                sk->sk_data_ready = svc_tcp_listen_data_ready;
                set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
        } else {
-               dprintk("setting up TCP socket for reading\n");
                sk->sk_state_change = svc_tcp_state_change;
                sk->sk_data_ready = svc_data_ready;
                sk->sk_write_space = svc_write_space;
 
-               svsk->sk_reclen = 0;
+               svsk->sk_marker = xdr_zero;
                svsk->sk_tcplen = 0;
                svsk->sk_datalen = 0;
                memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
@@ -1188,7 +1215,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
        int             pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
        int             err = 0;
 
-       dprintk("svc: svc_setup_socket %p\n", sock);
        svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
        if (!svsk)
                return ERR_PTR(-ENOMEM);
@@ -1225,12 +1251,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
        else
                svc_tcp_init(svsk, serv);
 
-       dprintk("svc: svc_setup_socket created %p (inet %p), "
-                       "listen %d close %d\n",
-                       svsk, svsk->sk_sk,
-                       test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags),
-                       test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
-
+       trace_svcsock_new_socket(sock);
        return svsk;
 }
 
@@ -1322,11 +1343,6 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
        struct sockaddr *newsin = (struct sockaddr *)&addr;
        int             newlen;
        int             family;
-       RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-
-       dprintk("svc: svc_create_socket(%s, %d, %s)\n",
-                       serv->sv_program->pg_name, protocol,
-                       __svc_print_addr(sin, buf, sizeof(buf)));
 
        if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
                printk(KERN_WARNING "svc: only UDP and TCP "
@@ -1383,7 +1399,6 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
        svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen);
        return (struct svc_xprt *)svsk;
 bummer:
-       dprintk("svc: svc_create_socket error = %d\n", -error);
        sock_release(sock);
        return ERR_PTR(error);
 }
@@ -1397,8 +1412,6 @@ static void svc_sock_detach(struct svc_xprt *xprt)
        struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
        struct sock *sk = svsk->sk_sk;
 
-       dprintk("svc: svc_sock_detach(%p)\n", svsk);
-
        /* put back the old socket callbacks */
        lock_sock(sk);
        sk->sk_state_change = svsk->sk_ostate;
@@ -1415,8 +1428,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
 {
        struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
 
-       dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk);
-
        svc_sock_detach(xprt);
 
        if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
@@ -1431,7 +1442,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
 static void svc_sock_free(struct svc_xprt *xprt)
 {
        struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
-       dprintk("svc: svc_sock_free(%p)\n", svsk);
 
        if (svsk->sk_sock->file)
                sockfd_put(svsk->sk_sock);
index 493a30a..d5cc5db 100644 (file)
@@ -663,6 +663,7 @@ static void xprt_autoclose(struct work_struct *work)
                container_of(work, struct rpc_xprt, task_cleanup);
        unsigned int pflags = memalloc_nofs_save();
 
+       trace_xprt_disconnect_auto(xprt);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        xprt->ops->close(xprt);
        xprt_release_write(xprt, NULL);
@@ -677,7 +678,7 @@ static void xprt_autoclose(struct work_struct *work)
  */
 void xprt_disconnect_done(struct rpc_xprt *xprt)
 {
-       dprintk("RPC:       disconnected transport %p\n", xprt);
+       trace_xprt_disconnect_done(xprt);
        spin_lock(&xprt->transport_lock);
        xprt_clear_connected(xprt);
        xprt_clear_write_space_locked(xprt);
@@ -694,6 +695,8 @@ EXPORT_SYMBOL_GPL(xprt_disconnect_done);
  */
 void xprt_force_disconnect(struct rpc_xprt *xprt)
 {
+       trace_xprt_disconnect_force(xprt);
+
        /* Don't race with the test_bit() in xprt_clear_locked() */
        spin_lock(&xprt->transport_lock);
        set_bit(XPRT_CLOSE_WAIT, &xprt->state);
@@ -832,8 +835,10 @@ void xprt_connect(struct rpc_task *task)
        if (!xprt_lock_write(xprt, task))
                return;
 
-       if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
+       if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
+               trace_xprt_disconnect_cleanup(xprt);
                xprt->ops->close(xprt);
+       }
 
        if (!xprt_connected(xprt)) {
                task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
@@ -1460,7 +1465,7 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
         */
        req->rq_ntrans++;
 
-       trace_xprt_sendto(&req->rq_snd_buf);
+       trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
        connect_cookie = xprt->connect_cookie;
        status = xprt->ops->send_request(req);
        if (status != 0) {
@@ -1903,11 +1908,8 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
 
 found:
        xprt = t->setup(args);
-       if (IS_ERR(xprt)) {
-               dprintk("RPC:       xprt_create_transport: failed, %ld\n",
-                               -PTR_ERR(xprt));
+       if (IS_ERR(xprt))
                goto out;
-       }
        if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
                xprt->idle_timeout = 0;
        INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
@@ -1928,8 +1930,7 @@ found:
 
        rpc_xprt_debugfs_register(xprt);
 
-       dprintk("RPC:       created transport %p with %u slots\n", xprt,
-                       xprt->max_reqs);
+       trace_xprt_create(xprt);
 out:
        return xprt;
 }
@@ -1939,6 +1940,8 @@ static void xprt_destroy_cb(struct work_struct *work)
        struct rpc_xprt *xprt =
                container_of(work, struct rpc_xprt, task_cleanup);
 
+       trace_xprt_destroy(xprt);
+
        rpc_xprt_debugfs_unregister(xprt);
        rpc_destroy_wait_queue(&xprt->binding);
        rpc_destroy_wait_queue(&xprt->pending);
@@ -1963,8 +1966,6 @@ static void xprt_destroy_cb(struct work_struct *work)
  */
 static void xprt_destroy(struct rpc_xprt *xprt)
 {
-       dprintk("RPC:       destroying transport %p\n", xprt);
-
        /*
         * Exclude transport connect/disconnect handlers and autoclose
         */
index 3c627dc..2081c8f 100644 (file)
@@ -892,8 +892,8 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
         * or privacy, direct data placement of individual data items
         * is not allowed.
         */
-       ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
-                                               RPCAUTH_AUTH_DATATOUCH);
+       ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH,
+                               &rqst->rq_cred->cr_auth->au_flags);
 
        /*
         * Chunks needed for results?
index af7eb8d..1ee73f7 100644 (file)
 #include "xprt_rdma.h"
 #include <trace/events/rpcrdma.h>
 
-#define RPCDBG_FACILITY        RPCDBG_SVCXPRT
-
-#undef SVCRDMA_BACKCHANNEL_DEBUG
-
 /**
- * svc_rdma_handle_bc_reply - Process incoming backchannel reply
- * @xprt: controlling backchannel transport
- * @rdma_resp: pointer to incoming transport header
- * @rcvbuf: XDR buffer into which to decode the reply
+ * svc_rdma_handle_bc_reply - Process incoming backchannel Reply
+ * @rqstp: resources for handling the Reply
+ * @rctxt: Received message
  *
- * Returns:
- *     %0 if @rcvbuf is filled in, xprt_complete_rqst called,
- *     %-EAGAIN if server should call ->recvfrom again.
  */
-int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
-                            struct xdr_buf *rcvbuf)
+void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+                             struct svc_rdma_recv_ctxt *rctxt)
 {
+       struct svc_xprt *sxprt = rqstp->rq_xprt;
+       struct rpc_xprt *xprt = sxprt->xpt_bc_xprt;
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+       struct xdr_buf *rcvbuf = &rqstp->rq_arg;
        struct kvec *dst, *src = &rcvbuf->head[0];
+       __be32 *rdma_resp = rctxt->rc_recv_buf;
        struct rpc_rqst *req;
        u32 credits;
-       size_t len;
-       __be32 xid;
-       __be32 *p;
-       int ret;
-
-       p = (__be32 *)src->iov_base;
-       len = src->iov_len;
-       xid = *rdma_resp;
-
-#ifdef SVCRDMA_BACKCHANNEL_DEBUG
-       pr_info("%s: xid=%08x, length=%zu\n",
-               __func__, be32_to_cpu(xid), len);
-       pr_info("%s: RPC/RDMA: %*ph\n",
-               __func__, (int)RPCRDMA_HDRLEN_MIN, rdma_resp);
-       pr_info("%s:      RPC: %*ph\n",
-               __func__, (int)len, p);
-#endif
-
-       ret = -EAGAIN;
-       if (src->iov_len < 24)
-               goto out_shortreply;
 
        spin_lock(&xprt->queue_lock);
-       req = xprt_lookup_rqst(xprt, xid);
+       req = xprt_lookup_rqst(xprt, *rdma_resp);
        if (!req)
-               goto out_notfound;
+               goto out_unlock;
 
        dst = &req->rq_private_buf.head[0];
        memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
-       if (dst->iov_len < len)
+       if (dst->iov_len < src->iov_len)
                goto out_unlock;
-       memcpy(dst->iov_base, p, len);
+       memcpy(dst->iov_base, src->iov_base, src->iov_len);
        xprt_pin_rqst(req);
        spin_unlock(&xprt->queue_lock);
 
@@ -71,31 +46,17 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
                credits = 1;    /* don't deadlock */
        else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
                credits = r_xprt->rx_buf.rb_bc_max_requests;
-
        spin_lock(&xprt->transport_lock);
        xprt->cwnd = credits << RPC_CWNDSHIFT;
        spin_unlock(&xprt->transport_lock);
 
        spin_lock(&xprt->queue_lock);
-       ret = 0;
        xprt_complete_rqst(req->rq_task, rcvbuf->len);
        xprt_unpin_rqst(req);
        rcvbuf->len = 0;
 
 out_unlock:
        spin_unlock(&xprt->queue_lock);
-out:
-       return ret;
-
-out_shortreply:
-       dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
-               xprt, src->iov_len);
-       goto out;
-
-out_notfound:
-       dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
-               xprt, be32_to_cpu(xid));
-       goto out_unlock;
 }
 
 /* Send a backwards direction RPC call.
@@ -192,10 +153,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
        *p++ = xdr_zero;
        *p   = xdr_zero;
 
-#ifdef SVCRDMA_BACKCHANNEL_DEBUG
-       pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
-#endif
-
        rqst->rq_xtime = ktime_get();
        rc = svc_rdma_bc_sendto(rdma, rqst, ctxt);
        if (rc)
@@ -206,45 +163,36 @@ put_ctxt:
        svc_rdma_send_ctxt_put(rdma, ctxt);
 
 drop_connection:
-       dprintk("svcrdma: failed to send bc call\n");
        return -ENOTCONN;
 }
 
-/* Send an RPC call on the passive end of a transport
- * connection.
+/**
+ * xprt_rdma_bc_send_request - Send a reverse-direction Call
+ * @rqst: rpc_rqst containing Call message to be sent
+ *
+ * Return values:
+ *   %0 if the message was sent successfully
+ *   %ENOTCONN if the message was not sent
  */
-static int
-xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
+static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
 {
        struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
-       struct svcxprt_rdma *rdma;
+       struct svcxprt_rdma *rdma =
+               container_of(sxprt, struct svcxprt_rdma, sc_xprt);
        int ret;
 
-       dprintk("svcrdma: sending bc call with xid: %08x\n",
-               be32_to_cpu(rqst->rq_xid));
+       if (test_bit(XPT_DEAD, &sxprt->xpt_flags))
+               return -ENOTCONN;
 
-       mutex_lock(&sxprt->xpt_mutex);
-
-       ret = -ENOTCONN;
-       rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
-       if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) {
-               ret = rpcrdma_bc_send_request(rdma, rqst);
-               if (ret == -ENOTCONN)
-                       svc_close_xprt(sxprt);
-       }
-
-       mutex_unlock(&sxprt->xpt_mutex);
-
-       if (ret < 0)
-               return ret;
-       return 0;
+       ret = rpcrdma_bc_send_request(rdma, rqst);
+       if (ret == -ENOTCONN)
+               svc_close_xprt(sxprt);
+       return ret;
 }
 
 static void
 xprt_rdma_bc_close(struct rpc_xprt *xprt)
 {
-       dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
-
        xprt_disconnect_done(xprt);
        xprt->cwnd = RPC_CWNDSHIFT;
 }
@@ -252,8 +200,6 @@ xprt_rdma_bc_close(struct rpc_xprt *xprt)
 static void
 xprt_rdma_bc_put(struct rpc_xprt *xprt)
 {
-       dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
-
        xprt_rdma_free_addresses(xprt);
        xprt_free(xprt);
 }
@@ -288,19 +234,14 @@ xprt_setup_rdma_bc(struct xprt_create *args)
        struct rpc_xprt *xprt;
        struct rpcrdma_xprt *new_xprt;
 
-       if (args->addrlen > sizeof(xprt->addr)) {
-               dprintk("RPC:       %s: address too large\n", __func__);
+       if (args->addrlen > sizeof(xprt->addr))
                return ERR_PTR(-EBADF);
-       }
 
        xprt = xprt_alloc(args->net, sizeof(*new_xprt),
                          RPCRDMA_MAX_BC_REQUESTS,
                          RPCRDMA_MAX_BC_REQUESTS);
-       if (!xprt) {
-               dprintk("RPC:       %s: couldn't allocate rpc_xprt\n",
-                       __func__);
+       if (!xprt)
                return ERR_PTR(-ENOMEM);
-       }
 
        xprt->timeout = &xprt_rdma_bc_timeout;
        xprt_set_bound(xprt);
index efa5fcb..e426fed 100644 (file)
@@ -665,23 +665,23 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
        return hdr_len;
 
 out_short:
-       trace_svcrdma_decode_short(rq_arg->len);
+       trace_svcrdma_decode_short_err(rq_arg->len);
        return -EINVAL;
 
 out_version:
-       trace_svcrdma_decode_badvers(rdma_argp);
+       trace_svcrdma_decode_badvers_err(rdma_argp);
        return -EPROTONOSUPPORT;
 
 out_drop:
-       trace_svcrdma_decode_drop(rdma_argp);
+       trace_svcrdma_decode_drop_err(rdma_argp);
        return 0;
 
 out_proc:
-       trace_svcrdma_decode_badproc(rdma_argp);
+       trace_svcrdma_decode_badproc_err(rdma_argp);
        return -EINVAL;
 
 out_inval:
-       trace_svcrdma_decode_parse(rdma_argp);
+       trace_svcrdma_decode_parse_err(rdma_argp);
        return -EINVAL;
 }
 
@@ -878,12 +878,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
                goto out_drop;
        rqstp->rq_xprt_hlen = ret;
 
-       if (svc_rdma_is_backchannel_reply(xprt, p)) {
-               ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
-                                              &rqstp->rq_arg);
-               svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
-               return ret;
-       }
+       if (svc_rdma_is_backchannel_reply(xprt, p))
+               goto out_backchannel;
+
        svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
 
        p += rpcrdma_fixed_maxsz;
@@ -913,6 +910,8 @@ out_postfail:
        svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
        return ret;
 
+out_backchannel:
+       svc_rdma_handle_bc_reply(rqstp, ctxt);
 out_drop:
        svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
        return 0;
index 23c2d3c..5eb3530 100644 (file)
@@ -9,13 +9,10 @@
 
 #include <linux/sunrpc/rpc_rdma.h>
 #include <linux/sunrpc/svc_rdma.h>
-#include <linux/sunrpc/debug.h>
 
 #include "xprt_rdma.h"
 #include <trace/events/rpcrdma.h>
 
-#define RPCDBG_FACILITY        RPCDBG_SVCXPRT
-
 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
 
@@ -39,7 +36,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
 struct svc_rdma_rw_ctxt {
        struct list_head        rw_list;
        struct rdma_rw_ctx      rw_ctx;
-       int                     rw_nents;
+       unsigned int            rw_nents;
        struct sg_table         rw_sg_table;
        struct scatterlist      rw_first_sgl[];
 };
@@ -67,19 +64,22 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
                ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
                               GFP_KERNEL);
                if (!ctxt)
-                       goto out;
+                       goto out_noctx;
                INIT_LIST_HEAD(&ctxt->rw_list);
        }
 
        ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
        if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
                                   ctxt->rw_sg_table.sgl,
-                                  SG_CHUNK_SIZE)) {
-               kfree(ctxt);
-               ctxt = NULL;
-       }
-out:
+                                  SG_CHUNK_SIZE))
+               goto out_free;
        return ctxt;
+
+out_free:
+       kfree(ctxt);
+out_noctx:
+       trace_svcrdma_no_rwctx_err(rdma, sges);
+       return NULL;
 }
 
 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
@@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
        }
 }
 
+/**
+ * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
+ * @rdma: controlling transport instance
+ * @ctxt: R/W context to prepare
+ * @offset: RDMA offset
+ * @handle: RDMA tag/handle
+ * @direction: I/O direction
+ *
+ * Returns on success, the number of WQEs that will be needed
+ * on the workqueue, or a negative errno.
+ */
+static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
+                               struct svc_rdma_rw_ctxt *ctxt,
+                               u64 offset, u32 handle,
+                               enum dma_data_direction direction)
+{
+       int ret;
+
+       ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
+                              ctxt->rw_sg_table.sgl, ctxt->rw_nents,
+                              0, offset, handle, direction);
+       if (unlikely(ret < 0)) {
+               svc_rdma_put_rw_ctxt(rdma, ctxt);
+               trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
+       }
+       return ret;
+}
+
 /* A chunk context tracks all I/O for moving one Read or Write
  * chunk. This is a a set of rdma_rw's that handle data movement
  * for all segments of one chunk.
@@ -428,15 +456,13 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
                ctxt = svc_rdma_get_rw_ctxt(rdma,
                                            (write_len >> PAGE_SHIFT) + 2);
                if (!ctxt)
-                       goto out_noctx;
+                       return -ENOMEM;
 
                constructor(info, write_len, ctxt);
-               ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
-                                      rdma->sc_port_num, ctxt->rw_sg_table.sgl,
-                                      ctxt->rw_nents, 0, seg_offset,
-                                      seg_handle, DMA_TO_DEVICE);
+               ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
+                                          DMA_TO_DEVICE);
                if (ret < 0)
-                       goto out_initerr;
+                       return -EIO;
 
                trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
 
@@ -455,18 +481,9 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
        return 0;
 
 out_overflow:
-       dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
-               info->wi_nsegs);
+       trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
+                                    info->wi_nsegs);
        return -E2BIG;
-
-out_noctx:
-       dprintk("svcrdma: no R/W ctxs available\n");
-       return -ENOMEM;
-
-out_initerr:
-       svc_rdma_put_rw_ctxt(rdma, ctxt);
-       trace_svcrdma_dma_map_rwctx(rdma, ret);
-       return -EIO;
 }
 
 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
@@ -616,7 +633,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
        sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
        ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
        if (!ctxt)
-               goto out_noctx;
+               return -ENOMEM;
        ctxt->rw_nents = sge_no;
 
        sg = ctxt->rw_sg_table.sgl;
@@ -646,29 +663,18 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
                        goto out_overrun;
        }
 
-       ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
-                              cc->cc_rdma->sc_port_num,
-                              ctxt->rw_sg_table.sgl, ctxt->rw_nents,
-                              0, offset, rkey, DMA_FROM_DEVICE);
+       ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
+                                  DMA_FROM_DEVICE);
        if (ret < 0)
-               goto out_initerr;
+               return -EIO;
 
        list_add(&ctxt->rw_list, &cc->cc_rwctxts);
        cc->cc_sqecount += ret;
        return 0;
 
-out_noctx:
-       dprintk("svcrdma: no R/W ctxs available\n");
-       return -ENOMEM;
-
 out_overrun:
-       dprintk("svcrdma: request overruns rq_pages\n");
+       trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
        return -EINVAL;
-
-out_initerr:
-       trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
-       svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
-       return -EIO;
 }
 
 /* Walk the segments in the Read chunk starting at @p and construct
index b6c8643..38e7c3c 100644 (file)
@@ -868,12 +868,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
        __be32 *p;
        int ret;
 
-       /* Create the RDMA response header. xprt->xpt_mutex,
-        * acquired in svc_send(), serializes RPC replies. The
-        * code path below that inserts the credit grant value
-        * into each transport header runs only inside this
-        * critical section.
-        */
+       ret = -ENOTCONN;
+       if (svc_xprt_is_dead(xprt))
+               goto err0;
+
        ret = -ENOMEM;
        sctxt = svc_rdma_send_ctxt_get(rdma);
        if (!sctxt)
index ea54785..d38be57 100644 (file)
@@ -211,7 +211,12 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
        newxprt->sc_ord = param->initiator_depth;
 
        sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
-       svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
+       newxprt->sc_xprt.xpt_remotelen = svc_addr_len(sa);
+       memcpy(&newxprt->sc_xprt.xpt_remote, sa,
+              newxprt->sc_xprt.xpt_remotelen);
+       snprintf(newxprt->sc_xprt.xpt_remotebuf,
+                sizeof(newxprt->sc_xprt.xpt_remotebuf) - 1, "%pISc", sa);
+
        /* The remote port is arbitrary and not under the control of the
         * client ULP. Set it to a fixed value so that the DRC continues
         * to be effective after a reconnect.
@@ -309,11 +314,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
        struct svcxprt_rdma *cma_xprt;
        int ret;
 
-       dprintk("svcrdma: Creating RDMA listener\n");
-       if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
-               dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
+       if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
                return ERR_PTR(-EAFNOSUPPORT);
-       }
        cma_xprt = svc_rdma_create_xprt(serv, net);
        if (!cma_xprt)
                return ERR_PTR(-ENOMEM);
@@ -324,7 +326,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
                                   RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(listen_id)) {
                ret = PTR_ERR(listen_id);
-               dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
                goto err0;
        }
 
@@ -333,23 +334,17 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
         */
 #if IS_ENABLED(CONFIG_IPV6)
        ret = rdma_set_afonly(listen_id, 1);
-       if (ret) {
-               dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
+       if (ret)
                goto err1;
-       }
 #endif
        ret = rdma_bind_addr(listen_id, sa);
-       if (ret) {
-               dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
+       if (ret)
                goto err1;
-       }
        cma_xprt->sc_cm_id = listen_id;
 
        ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
-       if (ret) {
-               dprintk("svcrdma: rdma_listen failed = %d\n", ret);
+       if (ret)
                goto err1;
-       }
 
        /*
         * We need to use the address from the cm_id in case the
@@ -405,9 +400,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        if (!newxprt)
                return NULL;
 
-       dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
-               newxprt, newxprt->sc_cm_id);
-
        dev = newxprt->sc_cm_id->device;
        newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
 
@@ -443,21 +435,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 
        newxprt->sc_pd = ib_alloc_pd(dev, 0);
        if (IS_ERR(newxprt->sc_pd)) {
-               dprintk("svcrdma: error creating PD for connect request\n");
+               trace_svcrdma_pd_err(newxprt, PTR_ERR(newxprt->sc_pd));
                goto errout;
        }
        newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
                                            IB_POLL_WORKQUEUE);
-       if (IS_ERR(newxprt->sc_sq_cq)) {
-               dprintk("svcrdma: error creating SQ CQ for connect request\n");
+       if (IS_ERR(newxprt->sc_sq_cq))
                goto errout;
-       }
        newxprt->sc_rq_cq =
                ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
-       if (IS_ERR(newxprt->sc_rq_cq)) {
-               dprintk("svcrdma: error creating RQ CQ for connect request\n");
+       if (IS_ERR(newxprt->sc_rq_cq))
                goto errout;
-       }
 
        memset(&qp_attr, 0, sizeof qp_attr);
        qp_attr.event_handler = qp_event_handler;
@@ -481,7 +469,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 
        ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
        if (ret) {
-               dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
+               trace_svcrdma_qp_err(newxprt, ret);
                goto errout;
        }
        newxprt->sc_qp = newxprt->sc_cm_id->qp;
@@ -489,8 +477,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
                newxprt->sc_snd_w_inv = false;
        if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
-           !rdma_ib_or_roce(dev, newxprt->sc_port_num))
+           !rdma_ib_or_roce(dev, newxprt->sc_port_num)) {
+               trace_svcrdma_fabric_err(newxprt, -EINVAL);
                goto errout;
+       }
 
        if (!svc_rdma_post_recvs(newxprt))
                goto errout;
@@ -512,15 +502,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
                                           dev->attrs.max_qp_init_rd_atom);
        if (!conn_param.initiator_depth) {
-               dprintk("svcrdma: invalid ORD setting\n");
                ret = -EINVAL;
+               trace_svcrdma_initdepth_err(newxprt, ret);
                goto errout;
        }
        conn_param.private_data = &pmsg;
        conn_param.private_data_len = sizeof(pmsg);
        ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
-       if (ret)
+       if (ret) {
+               trace_svcrdma_accept_err(newxprt, ret);
                goto errout;
+       }
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        dprintk("svcrdma: new connection %p accepted:\n", newxprt);
@@ -535,12 +527,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        dprintk("    ord             : %d\n", conn_param.initiator_depth);
 #endif
 
-       trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
        return &newxprt->sc_xprt;
 
  errout:
-       dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
-       trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
        /* Take a reference in case the DTO handler runs */
        svc_xprt_get(&newxprt->sc_xprt);
        if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
@@ -578,8 +567,6 @@ static void __svc_rdma_free(struct work_struct *work)
                container_of(work, struct svcxprt_rdma, sc_work);
        struct svc_xprt *xprt = &rdma->sc_xprt;
 
-       trace_svcrdma_xprt_free(xprt);
-
        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
                ib_drain_qp(rdma->sc_qp);
 
index 659da37..0c4af7f 100644 (file)
@@ -68,7 +68,7 @@
  * tunables
  */
 
-unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
+static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
 unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
 unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
 unsigned int xprt_rdma_memreg_strategy         = RPCRDMA_FRWR;
@@ -281,8 +281,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
 {
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 
-       trace_xprtrdma_op_destroy(r_xprt);
-
        cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
 
        rpcrdma_xprt_disconnect(r_xprt);
@@ -365,10 +363,6 @@ xprt_setup_rdma(struct xprt_create *args)
 
        xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
 
-       dprintk("RPC:       %s: %s:%s\n", __func__,
-               xprt->address_strings[RPC_DISPLAY_ADDR],
-               xprt->address_strings[RPC_DISPLAY_PORT]);
-       trace_xprtrdma_create(new_xprt);
        return xprt;
 }
 
@@ -385,8 +379,6 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
 {
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 
-       trace_xprtrdma_op_close(r_xprt);
-
        rpcrdma_xprt_disconnect(r_xprt);
 
        xprt->reestablish_timeout = 0;
index 05c4d3a..2ae3483 100644 (file)
@@ -141,7 +141,6 @@ void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
        if (wc->status != IB_WC_SUCCESS &&
            r_xprt->rx_ep->re_connect_status == 1) {
                r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
-               trace_xprtrdma_flush_dct(r_xprt, wc->status);
                xprt_force_disconnect(xprt);
        }
 }
index 3a143e2..914508e 100644 (file)
@@ -2528,8 +2528,16 @@ static int bc_sendto(struct rpc_rqst *req)
        return sent;
 }
 
-/*
- * The send routine. Borrows from svc_send
+/**
+ * bc_send_request - Send a backchannel Call on a TCP socket
+ * @req: rpc_rqst containing Call message to be sent
+ *
+ * xpt_mutex ensures @rqstp's whole message is written to the socket
+ * without interruption.
+ *
+ * Return values:
+ *   %0 if the message was sent successfully
+ *   %ENOTCONN if the message was not sent
  */
 static int bc_send_request(struct rpc_rqst *req)
 {
index 0e98900..ec10041 100644 (file)
@@ -629,7 +629,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
 static void tls_build_proto(struct sock *sk)
 {
        int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
-       const struct proto *prot = READ_ONCE(sk->sk_prot);
+       struct proto *prot = READ_ONCE(sk->sk_prot);
 
        /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
        if (ip_ver == TLSV6 &&
index 013ba3a..ce0b99f 100644 (file)
@@ -8,7 +8,7 @@ config HAVE_GCC_PLUGINS
 menuconfig GCC_PLUGINS
        bool "GCC plugins"
        depends on HAVE_GCC_PLUGINS
-       depends on CC_IS_GCC && GCC_VERSION >= 40800
+       depends on CC_IS_GCC
        depends on $(success,$(srctree)/scripts/gcc-plugin.sh $(CC))
        default y
        help
index f68d76d..b4c963f 100755 (executable)
@@ -321,7 +321,7 @@ if (defined($ENV{'KBUILD_VERBOSE'})) {
 
 # Generated docbook code is inserted in a template at a point where
 # docbook v3.1 requires a non-zero sequence of RefEntry's; see:
-# http://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
+# https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
 # We keep track of number of generated entries and generate a dummy
 # if needs be to ensure the expanded template can be postprocessed
 # into html.
index 47838f5..9630d25 100644 (file)
@@ -138,6 +138,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
 }
 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
 
+static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_group *group = &substream->self_group;
+
+       if (substream->pcm->nonatomic)
+               mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
+       else
+               spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
+}
+
 /**
  * snd_pcm_stream_unlock_irq - Unlock the PCM stream
  * @substream: PCM substream
@@ -2166,6 +2176,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
        }
        pcm_file = f.file->private_data;
        substream1 = pcm_file->substream;
+
+       if (substream == substream1) {
+               res = -EINVAL;
+               goto _badf;
+       }
+
        group = kzalloc(sizeof(*group), GFP_KERNEL);
        if (!group) {
                res = -ENOMEM;
@@ -2194,7 +2210,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
        snd_pcm_stream_unlock_irq(substream);
 
        snd_pcm_group_lock_irq(target_group, nonatomic);
-       snd_pcm_stream_lock(substream1);
+       snd_pcm_stream_lock_nested(substream1);
        snd_pcm_group_assign(substream1, target_group);
        refcount_inc(&target_group->refs);
        snd_pcm_stream_unlock(substream1);
@@ -2210,7 +2226,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
 
 static void relink_to_local(struct snd_pcm_substream *substream)
 {
-       snd_pcm_stream_lock(substream);
+       snd_pcm_stream_lock_nested(substream);
        snd_pcm_group_assign(substream, &substream->self_group);
        snd_pcm_stream_unlock(substream);
 }
index ddb7c2c..def8161 100644 (file)
@@ -1040,7 +1040,7 @@ static void snd_emu10k1x_proc_reg_write(struct snd_info_entry *entry,
                if (sscanf(line, "%x %x %x", &reg, &channel_id, &val) != 3)
                        continue;
 
-               if (reg < 0x49 && val <= 0xffffffff && channel_id <= 2)
+               if (reg < 0x49 && channel_id <= 2)
                        snd_emu10k1x_ptr_write(emu, reg, channel_id, val);
        }
 }
index 0aa778f..6d73f8b 100644 (file)
@@ -8161,6 +8161,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC225_STANDARD_PINS,
                {0x12, 0xb7a60130},
                {0x17, 0x90170110}),
+       SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC,
+               {0x14, 0x01014010},
+               {0x17, 0x90170120},
+               {0x18, 0x02a11030},
+               {0x19, 0x02a1103f},
+               {0x21, 0x0221101f}),
        {}
 };
 
index b9ce44d..0d63ebf 100644 (file)
@@ -754,6 +754,7 @@ static struct snd_soc_dai_driver max98390_dai[] = {
 static int max98390_dsm_init(struct snd_soc_component *component)
 {
        int ret;
+       int param_size, param_start_addr;
        char filename[128];
        const char *vendor, *product;
        struct max98390_priv *max98390 =
@@ -778,16 +779,31 @@ static int max98390_dsm_init(struct snd_soc_component *component)
        }
 
        dev_dbg(component->dev,
-               "max98390: param fw size %ld\n",
+               "max98390: param fw size %zd\n",
                fw->size);
+       if (fw->size < MAX98390_DSM_PARAM_MIN_SIZE) {
+               dev_err(component->dev,
+                       "param fw is invalid.\n");
+               goto err_alloc;
+       }
        dsm_param = (char *)fw->data;
+       param_start_addr = (dsm_param[0] & 0xff) | (dsm_param[1] & 0xff) << 8;
+       param_size = (dsm_param[2] & 0xff) | (dsm_param[3] & 0xff) << 8;
+       if (param_size > MAX98390_DSM_PARAM_MAX_SIZE ||
+               param_start_addr < DSM_STBASS_HPF_B0_BYTE0 ||
+               fw->size < param_size + MAX98390_DSM_PAYLOAD_OFFSET) {
+               dev_err(component->dev,
+                       "param fw is invalid.\n");
+               goto err_alloc;
+       }
+       regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x80);
        dsm_param += MAX98390_DSM_PAYLOAD_OFFSET;
-       regmap_bulk_write(max98390->regmap, DSM_EQ_BQ1_B0_BYTE0,
-               dsm_param,
-               fw->size - MAX98390_DSM_PAYLOAD_OFFSET);
-       release_firmware(fw);
+       regmap_bulk_write(max98390->regmap, param_start_addr,
+               dsm_param, param_size);
        regmap_write(max98390->regmap, MAX98390_R23E1_DSP_GLOBAL_EN, 0x01);
 
+err_alloc:
+       release_firmware(fw);
 err:
        return ret;
 }
index f59cb11..5f444e7 100644 (file)
 
 /* DSM register offset */
 #define MAX98390_DSM_PAYLOAD_OFFSET 16
-#define MAX98390_DSM_PAYLOAD_OFFSET_2 495
+#define MAX98390_DSM_PARAM_MAX_SIZE 770
+#define MAX98390_DSM_PARAM_MIN_SIZE 670
 
 struct max98390_priv {
        struct regmap *regmap;
index 2586d1c..8c9daf3 100644 (file)
@@ -80,8 +80,8 @@ int rl6231_calc_dmic_clk(int rate)
        for (i = 0; i < ARRAY_SIZE(div); i++) {
                if ((div[i] % 3) == 0)
                        continue;
-               /* find divider that gives DMIC frequency below 3.072MHz */
-               if (3072000 * div[i] >= rate)
+               /* find divider that gives DMIC frequency below 1.536MHz */
+               if (1536000 * div[i] >= rate)
                        return i;
        }
 
index 6ba1849..e2e1d5b 100644 (file)
@@ -3625,6 +3625,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = {
        .inv_jd1_1 = true,
 };
 
+static const struct rt5645_platform_data asus_t101ha_platform_data = {
+       .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+       .dmic2_data_pin = RT5645_DMIC2_DISABLE,
+       .jd_mode = 3,
+};
+
 static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = {
        .jd_mode = 3,
        .in2_diff = true,
@@ -3708,6 +3714,14 @@ static const struct dmi_system_id dmi_platform_data[] = {
                },
                .driver_data = (void *)&asus_t100ha_platform_data,
        },
+       {
+               .ident = "ASUS T101HA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+               },
+               .driver_data = (void *)&asus_t101ha_platform_data,
+       },
        {
                .ident = "MINIX Z83-4",
                .matches = {
index cf4feb8..00be739 100644 (file)
@@ -581,7 +581,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
 
        if (!fsl_asoc_card_is_ac97(priv) && !codec_dev) {
                dev_err(&pdev->dev, "failed to find codec device\n");
-               ret = -EINVAL;
+               ret = -EPROBE_DEFER;
                goto asrc_fail;
        }
 
index 30f70bb..1fdb70b 100644 (file)
@@ -754,6 +754,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_JD_NOT_INV |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Toshiba Encore WT10-A */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"),
+               },
+               .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+                                       BYT_RT5640_JD_SRC_JD1_IN4P |
+                                       BYT_RT5640_OVCD_TH_2000UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_SSP0_AIF2 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* Catch-all for generic Insyde tablets, must be last */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
index 48eda1a..954ab01 100644 (file)
@@ -407,7 +407,7 @@ static struct snd_soc_dai_link geminilake_dais[] = {
                .name = "Glk Audio Echo Reference cap",
                .stream_name = "Echoreference Capture",
                .init = NULL,
-               .capture_only = 1,
+               .dpcm_capture = 1,
                .nonatomic = 1,
                .dynamic = 1,
                SND_SOC_DAILINK_REG(echoref, dummy, platform),
index cc9b5ea..e29c31f 100644 (file)
@@ -692,7 +692,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
                .name = "Kbl Audio Echo Reference cap",
                .stream_name = "Echoreference Capture",
                .init = NULL,
-               .capture_only = 1,
+               .dpcm_capture = 1,
                .nonatomic = 1,
                SND_SOC_DAILINK_REG(echoref, dummy, platform),
        },
@@ -858,7 +858,7 @@ static struct snd_soc_dai_link kabylake_max98_927_373_dais[] = {
                .name = "Kbl Audio Echo Reference cap",
                .stream_name = "Echoreference Capture",
                .init = NULL,
-               .capture_only = 1,
+               .dpcm_capture = 1,
                .nonatomic = 1,
                SND_SOC_DAILINK_REG(echoref, dummy, platform),
        },
index 658a9da..09ba55f 100644 (file)
@@ -672,7 +672,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
                .name = "Kbl Audio Echo Reference cap",
                .stream_name = "Echoreference Capture",
                .init = NULL,
-               .capture_only = 1,
+               .dpcm_capture = 1,
                .nonatomic = 1,
                SND_SOC_DAILINK_REG(echoref, dummy, platform),
        },
index 1b1f8d7..b34cf6c 100644 (file)
@@ -566,7 +566,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
                .name = "Kbl Audio Echo Reference cap",
                .stream_name = "Echoreference Capture",
                .init = NULL,
-               .capture_only = 1,
+               .dpcm_capture = 1,
                .nonatomic = 1,
                SND_SOC_DAILINK_REG(echoref, dummy, platform),
        },
index 2e9b56b..b2e8671 100644 (file)
@@ -249,7 +249,7 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
        /* Enable pclk to access registers and clock the fifo ip */
        ret = clk_prepare_enable(fifo->pclk);
        if (ret)
-               return ret;
+               goto free_irq;
 
        /* Setup status2 so it reports the memory pointer */
        regmap_update_bits(fifo->map, FIFO_CTRL1,
@@ -269,8 +269,14 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
        /* Take memory arbitror out of reset */
        ret = reset_control_deassert(fifo->arb);
        if (ret)
-               clk_disable_unprepare(fifo->pclk);
+               goto free_clk;
+
+       return 0;
 
+free_clk:
+       clk_disable_unprepare(fifo->pclk);
+free_irq:
+       free_irq(fifo->irq, ss);
        return ret;
 }
 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
index 2ca8c98..5a4a91c 100644 (file)
@@ -49,19 +49,26 @@ int meson_card_reallocate_links(struct snd_soc_card *card,
        links = krealloc(priv->card.dai_link,
                         num_links * sizeof(*priv->card.dai_link),
                         GFP_KERNEL | __GFP_ZERO);
+       if (!links)
+               goto err_links;
+
        ldata = krealloc(priv->link_data,
                         num_links * sizeof(*priv->link_data),
                         GFP_KERNEL | __GFP_ZERO);
-
-       if (!links || !ldata) {
-               dev_err(priv->card.dev, "failed to allocate links\n");
-               return -ENOMEM;
-       }
+       if (!ldata)
+               goto err_ldata;
 
        priv->card.dai_link = links;
        priv->link_data = ldata;
        priv->card.num_links = num_links;
        return 0;
+
+err_ldata:
+       kfree(links);
+err_links:
+       dev_err(priv->card.dev, "failed to allocate links\n");
+       return -ENOMEM;
+
 }
 EXPORT_SYMBOL_GPL(meson_card_reallocate_links);
 
index b07eca2..7b38720 100644 (file)
@@ -1648,9 +1648,25 @@ match:
                        dai_link->platforms->name = component->name;
 
                        /* convert non BE into BE */
-                       dai_link->no_pcm = 1;
-                       dai_link->dpcm_playback = 1;
-                       dai_link->dpcm_capture = 1;
+                       if (!dai_link->no_pcm) {
+                               dai_link->no_pcm = 1;
+
+                               if (dai_link->dpcm_playback)
+                                       dev_warn(card->dev,
+                                                "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n",
+                                                dai_link->name);
+                               if (dai_link->dpcm_capture)
+                                       dev_warn(card->dev,
+                                                "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n",
+                                                dai_link->name);
+
+                               /* convert normal link into DPCM one */
+                               if (!(dai_link->dpcm_playback ||
+                                     dai_link->dpcm_capture)) {
+                                       dai_link->dpcm_playback = !dai_link->capture_only;
+                                       dai_link->dpcm_capture = !dai_link->playback_only;
+                               }
+                       }
 
                        /*
                         * override any BE fixups
index 276505f..2c114b4 100644 (file)
@@ -2789,20 +2789,44 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
        struct snd_pcm *pcm;
        char new_name[64];
        int ret = 0, playback = 0, capture = 0;
+       int stream;
        int i;
 
+       if (rtd->dai_link->dynamic && rtd->num_cpus > 1) {
+               dev_err(rtd->dev,
+                       "DPCM doesn't support Multi CPU for Front-Ends yet\n");
+               return -EINVAL;
+       }
+
        if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) {
-               cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-               if (rtd->num_cpus > 1) {
-                       dev_err(rtd->dev,
-                               "DPCM doesn't support Multi CPU yet\n");
-                       return -EINVAL;
+               if (rtd->dai_link->dpcm_playback) {
+                       stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+                       for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+                               if (!snd_soc_dai_stream_valid(cpu_dai,
+                                                             stream)) {
+                                       dev_err(rtd->card->dev,
+                                               "CPU DAI %s for rtd %s does not support playback\n",
+                                               cpu_dai->name,
+                                               rtd->dai_link->stream_name);
+                                       return -EINVAL;
+                               }
+                       playback = 1;
+               }
+               if (rtd->dai_link->dpcm_capture) {
+                       stream = SNDRV_PCM_STREAM_CAPTURE;
+
+                       for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+                               if (!snd_soc_dai_stream_valid(cpu_dai,
+                                                             stream)) {
+                                       dev_err(rtd->card->dev,
+                                               "CPU DAI %s for rtd %s does not support capture\n",
+                                               cpu_dai->name,
+                                               rtd->dai_link->stream_name);
+                                       return -EINVAL;
+                               }
+                       capture = 1;
                }
-
-               playback = rtd->dai_link->dpcm_playback &&
-                          snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK);
-               capture = rtd->dai_link->dpcm_capture &&
-                         snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE);
        } else {
                /* Adapt stream for codec2codec links */
                int cpu_capture = rtd->dai_link->params ?
index ce053ba..d03b5be 100644 (file)
@@ -52,8 +52,10 @@ static int sof_nocodec_bes_setup(struct device *dev,
                links[i].platforms->name = dev_name(dev);
                links[i].codecs->dai_name = "snd-soc-dummy-dai";
                links[i].codecs->name = "snd-soc-dummy";
-               links[i].dpcm_playback = 1;
-               links[i].dpcm_capture = 1;
+               if (ops->drv[i].playback.channels_min)
+                       links[i].dpcm_playback = 1;
+               if (ops->drv[i].capture.channels_min)
+                       links[i].dpcm_capture = 1;
        }
 
        card->dai_link = links;
index fd6fd17..162bdd6 100644 (file)
@@ -634,7 +634,6 @@ static int usb_audio_probe(struct usb_interface *intf,
                                                                   id, &chip);
                                        if (err < 0)
                                                goto __error;
-                                       chip->pm_intf = intf;
                                        break;
                                } else if (vid[i] != -1 || pid[i] != -1) {
                                        dev_info(&dev->dev,
@@ -651,6 +650,13 @@ static int usb_audio_probe(struct usb_interface *intf,
                        goto __error;
                }
        }
+
+       if (chip->num_interfaces >= MAX_CARD_INTERFACES) {
+               dev_info(&dev->dev, "Too many interfaces assigned to the single USB-audio card\n");
+               err = -EINVAL;
+               goto __error;
+       }
+
        dev_set_drvdata(&dev->dev, chip);
 
        /*
@@ -703,6 +709,7 @@ static int usb_audio_probe(struct usb_interface *intf,
        }
 
        usb_chip[chip->index] = chip;
+       chip->intf[chip->num_interfaces] = intf;
        chip->num_interfaces++;
        usb_set_intfdata(intf, chip);
        atomic_dec(&chip->active);
@@ -818,19 +825,37 @@ void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
 
 int snd_usb_autoresume(struct snd_usb_audio *chip)
 {
+       int i, err;
+
        if (atomic_read(&chip->shutdown))
                return -EIO;
-       if (atomic_inc_return(&chip->active) == 1)
-               return usb_autopm_get_interface(chip->pm_intf);
+       if (atomic_inc_return(&chip->active) != 1)
+               return 0;
+
+       for (i = 0; i < chip->num_interfaces; i++) {
+               err = usb_autopm_get_interface(chip->intf[i]);
+               if (err < 0) {
+                       /* rollback */
+                       while (--i >= 0)
+                               usb_autopm_put_interface(chip->intf[i]);
+                       atomic_dec(&chip->active);
+                       return err;
+               }
+       }
        return 0;
 }
 
 void snd_usb_autosuspend(struct snd_usb_audio *chip)
 {
+       int i;
+
        if (atomic_read(&chip->shutdown))
                return;
-       if (atomic_dec_and_test(&chip->active))
-               usb_autopm_put_interface(chip->pm_intf);
+       if (!atomic_dec_and_test(&chip->active))
+               return;
+
+       for (i = 0; i < chip->num_interfaces; i++)
+               usb_autopm_put_interface(chip->intf[i]);
 }
 
 static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
@@ -843,9 +868,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
        if (chip == (void *)-1L)
                return 0;
 
-       chip->autosuspended = !!PMSG_IS_AUTO(message);
-       if (!chip->autosuspended)
-               snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
        if (!chip->num_suspended_intf++) {
                list_for_each_entry(as, &chip->pcm_list, list) {
                        snd_usb_pcm_suspend(as);
@@ -858,6 +880,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
                        snd_usb_mixer_suspend(mixer);
        }
 
+       if (!PMSG_IS_AUTO(message) && !chip->system_suspend) {
+               snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
+               chip->system_suspend = chip->num_suspended_intf;
+       }
+
        return 0;
 }
 
@@ -871,10 +898,10 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
 
        if (chip == (void *)-1L)
                return 0;
-       if (--chip->num_suspended_intf)
-               return 0;
 
        atomic_inc(&chip->active); /* avoid autopm */
+       if (chip->num_suspended_intf > 1)
+               goto out;
 
        list_for_each_entry(as, &chip->pcm_list, list) {
                err = snd_usb_pcm_resume(as);
@@ -896,9 +923,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
                snd_usbmidi_resume(p);
        }
 
-       if (!chip->autosuspended)
+ out:
+       if (chip->num_suspended_intf == chip->system_suspend) {
                snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
-       chip->autosuspended = 0;
+               chip->system_suspend = 0;
+       }
+       chip->num_suspended_intf--;
 
 err_out:
        atomic_dec(&chip->active); /* allow autopm after this point */
index 6d64921..4ec4910 100644 (file)
                .ifnum = QUIRK_NO_INTERFACE                             \
        }
 
+/* HP Thunderbolt Dock Audio Headset */
+{
+       USB_DEVICE(0x03f0, 0x0269),
+       QUIRK_DEVICE_PROFILE("HP", "Thunderbolt Dock Audio Headset",
+                            "HP-Thunderbolt-Dock-Audio-Headset"),
+},
+/* HP Thunderbolt Dock Audio Module */
+{
+       USB_DEVICE(0x03f0, 0x0567),
+       QUIRK_DEVICE_PROFILE("HP", "Thunderbolt Dock Audio Module",
+                            "HP-Thunderbolt-Dock-Audio-Module"),
+},
 /* FTDI devices */
 {
        USB_DEVICE(0x0403, 0xb8d8),
index 1c892c7..b91c4c0 100644 (file)
 struct media_device;
 struct media_intf_devnode;
 
+#define MAX_CARD_INTERFACES    16
+
 struct snd_usb_audio {
        int index;
        struct usb_device *dev;
        struct snd_card *card;
-       struct usb_interface *pm_intf;
+       struct usb_interface *intf[MAX_CARD_INTERFACES];
        u32 usb_id;
        struct mutex mutex;
-       unsigned int autosuspended:1;   
+       unsigned int system_suspend;
        atomic_t active;
        atomic_t shutdown;
        atomic_t usage_count;
index 5aaddc7..dd38c2b 100644 (file)
@@ -35,7 +35,7 @@ static acpi_status osl_add_table_to_list(char *signature, u32 instance);
 static acpi_status
 osl_read_table_from_file(char *filename,
                         acpi_size file_offset,
-                        char *signature, struct acpi_table_header **table);
+                        struct acpi_table_header **table);
 
 static acpi_status
 osl_map_table(acpi_size address,
@@ -1184,8 +1184,6 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
  *
  * PARAMETERS:  filename            - File that contains the desired table
  *              file_offset         - Offset of the table in file
- *              signature           - Optional ACPI Signature for desired table.
- *                                    A null terminated 4-character string.
  *              table               - Where a pointer to the table is returned
  *
  * RETURN:      Status; Table buffer is returned if AE_OK.
@@ -1197,7 +1195,7 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
 static acpi_status
 osl_read_table_from_file(char *filename,
                         acpi_size file_offset,
-                        char *signature, struct acpi_table_header **table)
+                        struct acpi_table_header **table)
 {
        FILE *table_file;
        struct acpi_table_header header;
@@ -1225,6 +1223,8 @@ osl_read_table_from_file(char *filename,
                goto exit;
        }
 
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+
        /* If signature is specified, it must match the table */
 
        if (signature) {
@@ -1244,6 +1244,7 @@ osl_read_table_from_file(char *filename,
                        goto exit;
                }
        }
+#endif
 
        table_length = ap_get_table_length(&header);
        if (table_length == 0) {
@@ -1366,7 +1367,7 @@ osl_get_customized_table(char *pathname,
        /* There is no physical address saved for customized tables, use zero */
 
        *address = 0;
-       status = osl_read_table_from_file(table_filename, 0, NULL, table);
+       status = osl_read_table_from_file(table_filename, 0, table);
 
        return (status);
 }