Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Jun 2018 22:13:42 +0000 (07:13 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Jun 2018 22:13:42 +0000 (07:13 +0900)
Pull networking fixes from David Miller:

 1) Fix crash on bpf_prog_load() errors, from Daniel Borkmann.

 2) Fix ATM VCC memory accounting, from David Woodhouse.

 3) fib6_info objects need RCU freeing, from Eric Dumazet.

 4) Fix SO_BINDTODEVICE handling for TCP sockets, from David Ahern.

 5) Fix clobbered error code in enic_open() failure path, from
    Govindarajulu Varadarajan.

 6) Propagate dev_get_valid_name() error returns properly, from Li
    RongQing.

 7) Fix suspend/resume in davinci_emac driver, from Bartosz Golaszewski.

 8) Various act_ife fixes (recursive locking, IDR leaks, etc.) from
    Davide Caratti.

 9) Fix buggy checksum handling in sungem driver, from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (40 commits)
  ip: limit use of gso_size to udp
  stmmac: fix DMA channel hang in half-duplex mode
  net: stmmac: socfpga: add additional ocp reset line for Stratix10
  net: sungem: fix rx checksum support
  bpfilter: ignore binary files
  bpfilter: fix build error
  net/usb/drivers: Remove useless hrtimer_active check
  net/sched: act_ife: preserve the action control in case of error
  net/sched: act_ife: fix recursive lock and idr leak
  net: ethernet: fix suspend/resume in davinci_emac
  net: propagate dev_get_valid_name return code
  enic: do not overwrite error code
  net/tcp: Fix socket lookups with SO_BINDTODEVICE
  ptp: replace getnstimeofday64() with ktime_get_real_ts64()
  net/ipv6: respect rcu grace period before freeing fib6_info
  net: net_failover: fix typo in net_failover_slave_register()
  ipvlan: use ETH_MAX_MTU as max mtu
  net: hamradio: use eth_broadcast_addr
  enic: initialize enic->rfs_h.lock in enic_probe
  MAINTAINERS: Add Sam as the maintainer for NCSI
  ...

355 files changed:
Documentation/ABI/obsolete/sysfs-gpio
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
Documentation/block/biodoc.txt
Documentation/core-api/kernel-api.rst
Documentation/crypto/crypto_engine.rst
Documentation/devicetree/bindings/clock/st/st,clkgen.txt
Documentation/devicetree/bindings/clock/ti/gate.txt
Documentation/devicetree/bindings/clock/ti/interface.txt
Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
Documentation/devicetree/bindings/display/bridge/tda998x.txt
Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt
Documentation/devicetree/bindings/input/rotary-encoder.txt
Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt
Documentation/devicetree/bindings/mfd/as3722.txt
Documentation/devicetree/bindings/mfd/mt6397.txt
Documentation/devicetree/bindings/mfd/sun6i-prcm.txt
Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
Documentation/devicetree/bindings/mmc/sdhci-st.txt
Documentation/devicetree/bindings/net/dsa/ksz.txt
Documentation/devicetree/bindings/net/dsa/mt7530.txt
Documentation/devicetree/bindings/nvmem/zii,rave-sp-eeprom.txt
Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
Documentation/devicetree/bindings/pci/kirin-pcie.txt
Documentation/devicetree/bindings/pci/pci-keystone.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-max77620.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt
Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt
Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt
Documentation/devicetree/bindings/power/supply/ab8500/charger.txt
Documentation/devicetree/bindings/power/wakeup-source.txt
Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
Documentation/devicetree/bindings/sound/st,stm32-i2s.txt
Documentation/devicetree/bindings/sound/st,stm32-sai.txt
Documentation/devicetree/bindings/spi/spi-st-ssc.txt
Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
Documentation/driver-api/gpio/consumer.rst
Documentation/driver-api/infrastructure.rst
Documentation/filesystems/cifs/AUTHORS
Documentation/filesystems/cifs/CHANGES
Documentation/filesystems/cifs/TODO
Documentation/hwmon/ina2xx
Documentation/kprobes.txt
Documentation/maintainer/pull-requests.rst
Documentation/networking/can.rst
Documentation/sphinx/rstFlatTable.py
Documentation/trace/coresight.txt
Documentation/trace/events.rst
Documentation/trace/ftrace-uses.rst
Documentation/trace/histogram.txt
Documentation/trace/intel_th.rst
Documentation/trace/tracepoint-analysis.rst
Documentation/translations/ja_JP/howto.rst
Documentation/translations/ko_KR/howto.rst
Documentation/translations/zh_CN/SubmittingDrivers
Documentation/translations/zh_CN/gpio.txt
Documentation/translations/zh_CN/io_ordering.txt
Documentation/translations/zh_CN/magic-number.txt
Documentation/translations/zh_CN/video4linux/omap3isp.txt
Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/Kconfig
arch/arm/include/asm/cacheflush.h
arch/arm64/include/asm/cacheflush.h
arch/microblaze/include/asm/cacheflush.h
arch/parisc/Kconfig
arch/s390/include/asm/css_chars.h
arch/sh/Kconfig
arch/sparc/Kconfig
arch/um/Kconfig.um
arch/unicore32/include/asm/cacheflush.h
arch/x86/entry/vsyscall/vsyscall_64.c
arch/xtensa/include/asm/cacheflush.h
block/Kconfig
block/blk-mq-tag.c
block/blk-mq.c
block/blk-tag.c
block/bsg.c
certs/Kconfig
crypto/asymmetric_keys/asymmetric_type.c
crypto/asymmetric_keys/signature.c
drivers/base/Makefile
drivers/base/dma-coherent.c [deleted file]
drivers/base/dma-contiguous.c [deleted file]
drivers/base/dma-mapping.c [deleted file]
drivers/char/Kconfig
drivers/clk/clk.c
drivers/clk/ingenic/cgu.h
drivers/dma/dmaengine.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/gpu/drm/shmobile/Kconfig
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.h
drivers/gpu/drm/shmobile/shmob_drm_drv.h
drivers/gpu/drm/shmobile/shmob_drm_kms.c
drivers/gpu/drm/shmobile/shmob_drm_kms.h
drivers/gpu/drm/shmobile/shmob_drm_plane.c
drivers/gpu/vga/Kconfig
drivers/gpu/vga/vgaarb.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-steam.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/usbhid/Kconfig
drivers/hid/wacom_sys.c
drivers/input/Kconfig
drivers/input/joystick/Kconfig
drivers/input/joystick/iforce/Kconfig
drivers/input/joystick/walkera0701.c
drivers/input/misc/Kconfig
drivers/input/misc/rotary_encoder.c
drivers/input/mouse/Kconfig
drivers/input/mouse/alps.c
drivers/input/serio/Kconfig
drivers/input/touchscreen/wm97xx-core.c
drivers/lightnvm/pblk-rb.c
drivers/md/bcache/Kconfig
drivers/md/bcache/btree.c
drivers/md/bcache/extents.c
drivers/media/dvb-core/dvb_ringbuffer.c
drivers/media/dvb-frontends/Kconfig
drivers/media/dvb-frontends/dib3000.h
drivers/media/dvb-frontends/dib3000mb.c
drivers/media/dvb-frontends/eds1547.h
drivers/media/dvb-frontends/nxt200x.c
drivers/media/dvb-frontends/or51211.c
drivers/media/dvb-frontends/sp8870.c
drivers/media/dvb-frontends/sp887x.c
drivers/media/dvb-frontends/tda1004x.c
drivers/media/dvb-frontends/tda10071.c
drivers/media/dvb-frontends/z0194a.h
drivers/media/i2c/max2175.c
drivers/media/pci/bt8xx/Kconfig
drivers/media/pci/cx18/cx18-dvb.c
drivers/media/pci/cx18/cx18-streams.c
drivers/media/pci/cx23885/cx23885-cards.c
drivers/media/pci/meye/Kconfig
drivers/media/pci/ttpci/Kconfig
drivers/media/platform/pxa_camera.c
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
drivers/media/platform/via-camera.c
drivers/media/radio/Kconfig
drivers/media/radio/si470x/Kconfig
drivers/media/radio/wl128x/Kconfig
drivers/media/usb/dvb-usb-v2/Kconfig
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
drivers/media/usb/dvb-usb-v2/gl861.c
drivers/media/usb/dvb-usb-v2/lmedm04.c
drivers/media/usb/dvb-usb-v2/lmedm04.h
drivers/media/usb/dvb-usb-v2/mxl111sf.c
drivers/media/usb/dvb-usb-v2/mxl111sf.h
drivers/media/usb/dvb-usb/Kconfig
drivers/media/usb/dvb-usb/a800.c
drivers/media/usb/dvb-usb/af9005-fe.c
drivers/media/usb/dvb-usb/af9005-remote.c
drivers/media/usb/dvb-usb/af9005.c
drivers/media/usb/dvb-usb/af9005.h
drivers/media/usb/dvb-usb/az6027.c
drivers/media/usb/dvb-usb/cxusb.c
drivers/media/usb/dvb-usb/dibusb-common.c
drivers/media/usb/dvb-usb/dibusb-mb.c
drivers/media/usb/dvb-usb/dibusb-mc-common.c
drivers/media/usb/dvb-usb/dibusb-mc.c
drivers/media/usb/dvb-usb/dibusb.h
drivers/media/usb/dvb-usb/digitv.c
drivers/media/usb/dvb-usb/dtt200u-fe.c
drivers/media/usb/dvb-usb/dtt200u.c
drivers/media/usb/dvb-usb/dtt200u.h
drivers/media/usb/dvb-usb/dvb-usb-firmware.c
drivers/media/usb/dvb-usb/dvb-usb-init.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/usb/dvb-usb/friio-fe.c
drivers/media/usb/dvb-usb/friio.c
drivers/media/usb/dvb-usb/friio.h
drivers/media/usb/dvb-usb/gp8psk.c
drivers/media/usb/dvb-usb/gp8psk.h
drivers/media/usb/dvb-usb/m920x.c
drivers/media/usb/dvb-usb/nova-t-usb2.c
drivers/media/usb/dvb-usb/opera1.c
drivers/media/usb/dvb-usb/ttusb2.c
drivers/media/usb/dvb-usb/ttusb2.h
drivers/media/usb/dvb-usb/umt-010.c
drivers/media/usb/dvb-usb/vp702x-fe.c
drivers/media/usb/dvb-usb/vp702x.c
drivers/media/usb/dvb-usb/vp7045-fe.c
drivers/media/usb/dvb-usb/vp7045.c
drivers/media/usb/dvb-usb/vp7045.h
drivers/media/usb/gspca/m5602/Kconfig
drivers/media/usb/ttusb-dec/Kconfig
drivers/media/usb/zr364xx/Kconfig
drivers/net/ethernet/intel/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/loop.c
drivers/parport/Kconfig
drivers/platform/x86/Kconfig
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_eer.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/Makefile
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_trace.h [new file with mode: 0644]
drivers/sbus/char/oradax.c
drivers/soundwire/stream.c
drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
drivers/staging/media/bcm2048/TODO
drivers/staging/media/zoran/Kconfig
drivers/video/fbdev/Kconfig
drivers/video/fbdev/Makefile
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/radeon_pm.c
drivers/video/fbdev/au1100fb.c
drivers/video/fbdev/au1200fb.c
drivers/video/fbdev/auo_k1900fb.c [deleted file]
drivers/video/fbdev/auo_k1901fb.c [deleted file]
drivers/video/fbdev/auo_k190x.c [deleted file]
drivers/video/fbdev/auo_k190x.h [deleted file]
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/mmp/fb/mmpfb.c
drivers/video/fbdev/mmp/hw/mmp_ctrl.c
drivers/video/fbdev/nvidia/nvidia.c
drivers/video/fbdev/omap/lcd_ams_delta.c
drivers/video/fbdev/omap/lcd_h3.c
drivers/video/fbdev/omap/lcd_htcherald.c
drivers/video/fbdev/omap/lcd_inn1510.c
drivers/video/fbdev/omap/lcd_inn1610.c
drivers/video/fbdev/omap/lcd_osk.c
drivers/video/fbdev/omap/lcd_palmte.c
drivers/video/fbdev/omap/lcd_palmtt.c
drivers/video/fbdev/omap/lcd_palmz71.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/Kconfig
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
drivers/video/fbdev/pxafb.c
drivers/video/fbdev/savage/savagefb_driver.c
drivers/video/fbdev/sh_mobile_lcdcfb.c
drivers/video/fbdev/sh_mobile_lcdcfb.h
drivers/video/fbdev/sh_mobile_meram.c [deleted file]
drivers/video/fbdev/skeletonfb.c
drivers/video/fbdev/sm501fb.c
drivers/video/fbdev/via/global.h
drivers/video/fbdev/via/hw.c
drivers/video/fbdev/via/via-core.c
drivers/video/fbdev/via/via_clock.c
drivers/video/fbdev/via/viafbdev.c
fs/Kconfig.binfmt
fs/befs/ChangeLog
fs/binfmt_misc.c
fs/cifs/cifs_debug.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/smbdirect.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/jfs/xattr.c
fs/notify/dnotify/dnotify.c
fs/notify/fanotify/fanotify.c
fs/notify/fdinfo.c
fs/notify/fsnotify.c
fs/notify/fsnotify.h
fs/notify/group.c
fs/notify/inotify/inotify.h
fs/notify/inotify/inotify_fsnotify.c
fs/notify/inotify/inotify_user.c
fs/notify/mark.c
fs/orangefs/orangefs-sysfs.c
fs/proc/base.c
include/keys/asymmetric-subtype.h
include/keys/asymmetric-type.h
include/linux/assoc_array.h
include/linux/assoc_array_priv.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/circ_buf.h
include/linux/dma-contiguous.h
include/linux/fsnotify_backend.h
include/linux/ftrace.h
include/linux/mod_devicetable.h
include/linux/platform_data/sc18is602.h
include/linux/platform_data/shmob_drm.h
include/linux/rculist_nulls.h
include/linux/tracepoint.h
include/uapi/linux/prctl.h
include/video/auo_k190xfb.h [deleted file]
include/video/sh_mobile_lcdc.h
include/video/sh_mobile_meram.h [deleted file]
include/xen/interface/io/kbdif.h
init/Kconfig
kernel/Makefile
kernel/audit_fsnotify.c
kernel/audit_tree.c
kernel/audit_watch.c
kernel/cgroup/cpuset.c
kernel/dma/Kconfig [new file with mode: 0644]
kernel/dma/Makefile [new file with mode: 0644]
kernel/dma/coherent.c [new file with mode: 0644]
kernel/dma/contiguous.c [new file with mode: 0644]
kernel/dma/debug.c [new file with mode: 0644]
kernel/dma/direct.c [new file with mode: 0644]
kernel/dma/mapping.c [new file with mode: 0644]
kernel/dma/noncoherent.c [new file with mode: 0644]
kernel/dma/swiotlb.c [new file with mode: 0644]
kernel/dma/virt.c [new file with mode: 0644]
kernel/power/main.c
kernel/trace/Kconfig
lib/Kconfig
lib/Makefile
lib/dma-debug.c [deleted file]
lib/dma-direct.c [deleted file]
lib/dma-noncoherent.c [deleted file]
lib/dma-virt.c [deleted file]
lib/swiotlb.c [deleted file]
mm/memblock.c
scripts/documentation-file-ref-check
security/device_cgroup.c
security/selinux/hooks.c
sound/core/Kconfig
sound/drivers/Kconfig
sound/pci/Kconfig
tools/include/uapi/linux/prctl.h
tools/lib/api/fs/fs.c
tools/perf/util/bpf-prologue.c
tools/power/pm-graph/config/custom-timeline-functions.cfg

index 32513dc..40d41ea 100644 (file)
@@ -11,7 +11,7 @@ Description:
   Kernel code may export it for complete or partial access.
 
   GPIOs are identified as they are inside the kernel, using integers in
-  the range 0..INT_MAX.  See Documentation/gpio/gpio.txt for more information.
+  the range 0..INT_MAX.  See Documentation/gpio for more information.
 
     /sys/class/gpio
        /export ... asks the kernel to export a GPIO to userspace
index bd4975e..9c5e773 100644 (file)
@@ -238,9 +238,6 @@ Description:        Discover and change clock speed of CPUs
 
                See files in Documentation/cpu-freq/ for more information.
 
-               In particular, read Documentation/cpu-freq/user-guide.txt
-               to learn how to control the knobs.
-
 
 What:          /sys/devices/system/cpu/cpu#/cpufreq/freqdomain_cpus
 Date:          June 2013
index 638342d..efc7aa7 100644 (file)
                                (may crash computer or cause data corruption)
 
        ALSA            [HW,ALSA]
-                       See Documentation/sound/alsa/alsa-parameters.txt
+                       See Documentation/sound/alsa-configuration.rst
 
        alignment=      [KNL,ARM]
                        Allow the default userspace alignment fault handler
                        This will also cause panics on machine check exceptions.
                        Useful together with panic=30 to trigger a reboot.
 
-       OSS             [HW,OSS]
-                       See Documentation/sound/oss/oss-parameters.txt
-
        page_owner=     [KNL] Boot-time page_owner enabling option.
                        Storage of the information about who allocated
                        each page is disabled in default. With this switch,
                        [FTRACE] Set and start specified trace events in order
                        to facilitate early boot debugging. The event-list is a
                        comma separated list of trace events to enable. See
-                       also Documentation/trace/events.txt
+                       also Documentation/trace/events.rst
 
        trace_options=[option-list]
                        [FTRACE] Enable or disable tracer options at boot.
 
                              trace_options=stacktrace
 
-                       See also Documentation/trace/ftrace.txt "trace options"
+                       See also Documentation/trace/ftrace.rst "trace options"
                        section.
 
        tp_printk[FTRACE]
index 8692702..207eca5 100644 (file)
@@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag
 operations before calling end_that_request_last()! For an example of a user
 of these helpers, see the IDE tagged command queueing support.
 
-Certain hardware conditions may dictate a need to invalidate the block tag
-queue. For instance, on IDE any tagged request error needs to clear both
-the hardware and software block queue and enable the driver to sanely restart
-all the outstanding requests. There's a third helper to do that:
-
-       blk_queue_invalidate_tags(struct request_queue *q)
-
-       Clear the internal block tag queue and re-add all the pending requests
-       to the request queue. The driver will receive them again on the
-       next request_fn run, just like it did the first time it encountered
-       them.
-
 3.2.5.2 Tag info
 
 Some block functions exist to query current tag status or to go from a
@@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure:
 Most of the above is simple and straight forward, however busy_list may need
 a bit of explaining. Normally we don't care too much about request ordering,
 but in the event of any barrier requests in the tag queue we need to ensure
-that requests are restarted in the order they were queue. This may happen
-if the driver needs to use blk_queue_invalidate_tags().
+that requests are restarted in the order they were queue.
 
 3.3 I/O Submission
 
index 8e44aea..76fe2d0 100644 (file)
@@ -284,7 +284,7 @@ Resources Management
 MTRR Handling
 -------------
 
-.. kernel-doc:: arch/x86/kernel/cpu/mtrr/main.c
+.. kernel-doc:: arch/x86/kernel/cpu/mtrr/mtrr.c
    :export:
 
 Security Framework
index 8272ac9..1d56221 100644 (file)
@@ -8,11 +8,13 @@ The crypto engine API (CE), is a crypto queue manager.
 
 Requirement
 -----------
-You have to put at start of your tfm_ctx the struct crypto_engine_ctx
-struct your_tfm_ctx {
+You have to put at start of your tfm_ctx the struct crypto_engine_ctx::
+
+  struct your_tfm_ctx {
         struct crypto_engine_ctx enginectx;
         ...
-};
+  };
+
 Why: Since CE manage only crypto_async_request, it cannot know the underlying
 request_type and so have access only on the TFM.
 So using container_of for accessing __ctx is impossible.
index 7364953..45ac19b 100644 (file)
@@ -31,10 +31,10 @@ This binding uses the common clock binding[1].
 Each subnode should use the binding described in [2]..[7]
 
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[3] Documentation/devicetree/bindings/clock/st,clkgen-mux.txt
-[4] Documentation/devicetree/bindings/clock/st,clkgen-pll.txt
-[7] Documentation/devicetree/bindings/clock/st,quadfs.txt
-[8] Documentation/devicetree/bindings/clock/st,flexgen.txt
+[3] Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
+[4] Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
+[7] Documentation/devicetree/bindings/clock/st/st,quadfs.txt
+[8] Documentation/devicetree/bindings/clock/st/st,flexgen.txt
 
 
 Required properties:
index 03f8fde..56d603c 100644 (file)
@@ -10,7 +10,7 @@ will be controlled instead and the corresponding hw-ops for
 that is used.
 
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/gate-clock.txt
+[2] Documentation/devicetree/bindings/clock/gpio-gate-clock.txt
 [3] Documentation/devicetree/bindings/clock/ti/clockdomain.txt
 
 Required properties:
index 3111a40..3f47040 100644 (file)
@@ -9,7 +9,7 @@ companion clock finding (match corresponding functional gate
 clock) and hardware autoidle enable / disable.
 
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/gate-clock.txt
+[2] Documentation/devicetree/bindings/clock/gpio-gate-clock.txt
 
 Required properties:
 - compatible : shall be one of:
index d36f07e..0551c78 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
        "intermediate"  - A parent of "cpu" clock which is used as "intermediate" clock
                          source (usually MAINPLL) when the original CPU PLL is under
                          transition and not stable yet.
-       Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
+       Please refer to Documentation/devicetree/bindings/clock/clock-bindings.txt for
        generic clock consumer properties.
 - operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp.txt
        for detail.
index d6d2833..fc2bcbe 100644 (file)
@@ -12,7 +12,7 @@ Required properties:
 - clocks:               Phandles for clock specified in "clock-names" property
 - clock-names :                 The name of clock used by the DFI, must be
                         "pclk_ddr_mon";
-- operating-points-v2:  Refer to Documentation/devicetree/bindings/power/opp.txt
+- operating-points-v2:  Refer to Documentation/devicetree/bindings/opp/opp.txt
                         for details.
 - center-supply:        DMC supply node.
 - status:               Marks the node enabled/disabled.
index 1a4eaca..f5a02f6 100644 (file)
@@ -30,7 +30,7 @@ Optional properties:
   - nxp,calib-gpios: calibration GPIO, which must correspond with the
        gpio used for the TDA998x interrupt pin.
 
-[1] Documentation/sound/alsa/soc/DAI.txt
+[1] Documentation/sound/soc/dai.rst
 [2] include/dt-bindings/display/tda998x.h
 
 Example:
index 039219d..18a2cde 100644 (file)
@@ -34,7 +34,7 @@ Optional properties:
 - mali-supply : Phandle to regulator for the Mali device. Refer to
   Documentation/devicetree/bindings/regulator/regulator.txt for details.
 
-- operating-points-v2 : Refer to Documentation/devicetree/bindings/power/opp.txt
+- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt
   for details.
 
 
index c1f65d1..63cd911 100644 (file)
@@ -44,7 +44,7 @@ Optional properties:
 
   - memory-region:
     Memory region to allocate from, as defined in
-    Documentation/devicetree/bindi/reserved-memory/reserved-memory.txt
+    Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
 
   - mali-supply:
     Phandle to regulator for the Mali device, as defined in
index f2c30c8..9afffbd 100644 (file)
@@ -12,7 +12,7 @@ Additional documentation for F11 can be found at:
 http://www.synaptics.com/sites/default/files/511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
 
 Optional Touch Properties:
-Description in Documentation/devicetree/bindings/input/touch
+Description in Documentation/devicetree/bindings/input/touchscreen
 - touchscreen-inverted-x
 - touchscreen-inverted-y
 - touchscreen-swapped-x-y
index f99fe5c..a644408 100644 (file)
@@ -28,7 +28,7 @@ Deprecated properties:
   This property is deprecated. Instead, a 'steps-per-period ' value should
   be used, such as "rotary-encoder,steps-per-period = <2>".
 
-See Documentation/input/rotary-encoder.txt for more information.
+See Documentation/input/devices/rotary-encoder.rst for more information.
 
 Example:
 
index c7888d6..880d4d7 100644 (file)
@@ -28,7 +28,7 @@ See: Documentation/devicetree/bindings/clock/clock-bindings.txt
 - pinctrl-names        : a pinctrl state named tsin%d-serial or tsin%d-parallel (where %d is tsin-num)
                   must be defined for each tsin child node.
 - pinctrl-0    : phandle referencing pin configuration for this tsin configuration
-See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 
 
 Required properties (tsin (child) node):
index 0b2a609..5297b22 100644 (file)
@@ -46,7 +46,7 @@ is required:
        Following properties are require if pin control setting is required
        at boot.
        - pinctrl-names: A pinctrl state named "default" be defined, using the
-               bindings in pinctrl/pinctrl-binding.txt.
+               bindings in pinctrl/pinctrl-bindings.txt.
        - pinctrl[0...n]: Properties to contain the phandle that refer to
                different nodes of pin control settings. These nodes represents
                the pin control setting of state 0 to state n. Each of these
index d1df77f..0ebd08a 100644 (file)
@@ -12,7 +12,7 @@ MT6397/MT6323 is a multifunction device with the following sub modules:
 It is interfaced to host controller using SPI interface by a proprietary hardware
 called PMIC wrapper or pwrap. MT6397/MT6323 MFD is a child device of pwrap.
 See the following for pwarp node definitions:
-Documentation/devicetree/bindings/soc/pwrap.txt
+Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
 
 This document describes the binding for MFD device and its sub module.
 
index dd2c065..daa091c 100644 (file)
@@ -8,8 +8,8 @@ Required properties:
  - reg: The PRCM registers range
 
 The prcm node may contain several subdevices definitions:
- - see Documentation/devicetree/clk/sunxi.txt for clock devices
- - see Documentation/devicetree/reset/allwinner,sunxi-clock-reset.txt for reset
+ - see Documentation/devicetree/bindings/clock/sunxi.txt for clock devices
+ - see Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt for reset
    controller devices
 
 
index a58c173..0419a63 100644 (file)
@@ -62,7 +62,7 @@ Required properties for a slot (Deprecated - Recommend to use one slot per host)
   rest of the gpios (depending on the bus-width property) are the data lines in
   no particular order. The format of the gpio specifier depends on the gpio
   controller.
-(Deprecated - Refer to Documentation/devicetree/binding/pinctrl/samsung-pinctrl.txt)
+(Deprecated - Refer to Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt)
 
 Example:
 
index 3149297..f064528 100644 (file)
@@ -12,7 +12,7 @@ Required properties:
           See: Documentation/devicetree/bindings/clock/clock-bindings.txt
 - pinctrl-names: A pinctrl state names "default" must be defined.
 - pinctrl-0: Phandle referencing pin configuration of the SDHCI controller.
-             See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+             See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 
 Example:
 
index 6b3d40c..ccf82b4 100644 (file)
@@ -20,7 +20,7 @@ Required properties:
 
 - pinctrl-names:       A pinctrl state names "default" must be defined.
 - pinctrl-0:           Phandle referencing pin configuration of the sd/emmc controller.
-                       See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+                       See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 
 - reg:                 This must provide the host controller base address and it can also
                        contain the FlashSS Top register for TX/RX delay used by the driver
index fd23904..a700943 100644 (file)
@@ -6,7 +6,7 @@ Required properties:
 - compatible: For external switch chips, compatible string must be exactly one
   of: "microchip,ksz9477"
 
-See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
 required and optional properties.
 
 Examples:
index a9bc27b..aa3527f 100644 (file)
@@ -31,7 +31,7 @@ Required properties for the child nodes within ports container:
 - phy-mode: String, must be either "trgmii" or "rgmii" for port labeled
         "cpu".
 
-See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
 required, optional properties and how the integrated switch subnodes must
 be specified.
 
index d5e22fc..0df79d9 100644 (file)
@@ -18,7 +18,7 @@ Optional properties:
 Data cells:
 
 Data cells are child nodes of eerpom node, bindings for which are
-documented in Documentation/bindings/nvmem/nvmem.txt
+documented in Documentation/devicetree/bindings/nvmem/nvmem.txt
 
 Example:
 
index 7bf9df0..0dcb87d 100644 (file)
@@ -3,7 +3,7 @@ HiSilicon Hip05 and Hip06 PCIe host bridge DT description
 HiSilicon PCIe host controller is based on the Synopsys DesignWare PCI core.
 It shares common functions with the PCIe DesignWare core driver and inherits
 common properties defined in
-Documentation/devicetree/bindings/pci/designware-pci.txt.
+Documentation/devicetree/bindings/pci/designware-pcie.txt.
 
 Additional properties are described here:
 
index 6e217c6..6bbe438 100644 (file)
@@ -3,7 +3,7 @@ HiSilicon Kirin SoCs PCIe host DT description
 Kirin PCIe host controller is based on the Synopsys DesignWare PCI core.
 It shares common functions with the PCIe DesignWare core driver and
 inherits common properties defined in
-Documentation/devicetree/bindings/pci/designware-pci.txt.
+Documentation/devicetree/bindings/pci/designware-pcie.txt.
 
 Additional properties are described here:
 
index 7e05487..3d4a209 100644 (file)
@@ -3,9 +3,9 @@ TI Keystone PCIe interface
 Keystone PCI host Controller is based on the Synopsys DesignWare PCI
 hardware version 3.65.  It shares common functions with the PCIe DesignWare
 core driver and inherits common properties defined in
-Documentation/devicetree/bindings/pci/designware-pci.txt
+Documentation/devicetree/bindings/pci/designware-pcie.txt
 
-Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt
+Please refer to Documentation/devicetree/bindings/pci/designware-pcie.txt
 for the details of DesignWare DT bindings.  Additional properties are
 described here as well as properties that are not applicable.
 
index ad4fce3..511fc23 100644 (file)
@@ -11,9 +11,9 @@ Optional Pinmux properties:
 --------------------------
 Following properties are required if default setting of pins are required
 at boot.
-- pinctrl-names: A pinctrl state named per <pinctrl-binding.txt>.
+- pinctrl-names: A pinctrl state named per <pinctrl-bindings.txt>.
 - pinctrl[0...n]: Properties to contain the phandle for pinctrl states per
-               <pinctrl-binding.txt>.
+               <pinctrl-bindings.txt>.
 
 The pin configurations are defined as child of the pinctrl states node. Each
 sub-node have following properties:
index a677145..625a22e 100644 (file)
@@ -101,9 +101,9 @@ Optional Pinmux properties:
 --------------------------
 Following properties are required if default setting of pins are required
 at boot.
-- pinctrl-names: A pinctrl state named per <pinctrl-binding.txt>.
+- pinctrl-names: A pinctrl state named per <pinctrl-bindings.txt>.
 - pinctrl[0...n]: Properties to contain the phandle for pinctrl states per
-               <pinctrl-binding.txt>.
+               <pinctrl-bindings.txt>.
 
 The pin configurations are defined as child of the pinctrl states node. Each
 sub-node have following properties:
index eee3dc2..cbcbd31 100644 (file)
@@ -10,9 +10,9 @@ Optional Pinmux properties:
 --------------------------
 Following properties are required if default setting of pins are required
 at boot.
-- pinctrl-names: A pinctrl state named per <pinctrl-binding.txt>.
+- pinctrl-names: A pinctrl state named per <pinctrl-bindings.txt>.
 - pinctrl[0...n]: Properties to contain the phandle for pinctrl states per
-               <pinctrl-binding.txt>.
+               <pinctrl-bindings.txt>.
 
 The pin configurations are defined as child of the pinctrl states node. Each
 sub-node have following properties:
index b31d6bb..726ec28 100644 (file)
@@ -14,7 +14,7 @@ Required properties:
   datasheet
 - interrupts: Should contain one interrupt specifier for the GPC interrupt
 - clocks: Must contain an entry for each entry in clock-names.
-  See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+  See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
 - clock-names: Must include the following entries:
   - ipg
 
index 0ba1bcc..f181e46 100644 (file)
@@ -13,4 +13,4 @@ Required Properties:
        };
 
 For information on battery specific node, Ref:
-Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
+Documentation/devicetree/bindings/power/supply/ab8500/fg.txt
index ef53283..56636f9 100644 (file)
@@ -13,4 +13,4 @@ ab8500_chargalg {
 };
 
 For information on battery specific node, Ref:
-Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
+Documentation/devicetree/bindings/power/supply/ab8500/fg.txt
index 6bdbb08..24ada03 100644 (file)
@@ -22,4 +22,4 @@ Required Properties:
        };
 
 For information on battery specific node, Ref:
-Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
+Documentation/devicetree/bindings/power/supply/ab8500/fg.txt
index 5d254ab..cfd7465 100644 (file)
@@ -22,7 +22,7 @@ List of legacy properties and respective binding document
 3. "has-tpo"                   Documentation/devicetree/bindings/rtc/rtc-opal.txt
 4. "linux,wakeup"              Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
                                Documentation/devicetree/bindings/mfd/tc3589x.txt
-                               Documentation/devicetree/bindings/input/ads7846.txt
+                               Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
 5. "linux,keypad-wakeup"       Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
 6. "linux,input-wakeup"                Documentation/devicetree/bindings/input/samsung-keypad.txt
 7. "nvidia,wakeup-source"      Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
index 7a34345..c8dd440 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
           See: Documentation/devicetree/bindings/clock/clock-bindings.txt
 - pinctrl-names: A pinctrl state names "default" must be defined.
 - pinctrl-0: Phandle referencing pin configuration of the UART peripheral.
-             See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+             See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
 
 Optional properties:
 - cts-gpios: CTS pin for UART
index 4bda520..58c3413 100644 (file)
@@ -18,7 +18,7 @@ Required properties:
     See Documentation/devicetree/bindings/dma/stm32-dma.txt.
   - dma-names: Identifier for each DMA request line. Must be "tx" and "rx".
   - pinctrl-names: should contain only value "default"
-  - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
+  - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
 
 Optional properties:
   - resets: Reference to a reset controller asserting the reset controller
index f301cdf..3a3fc50 100644 (file)
@@ -37,7 +37,7 @@ SAI subnodes required properties:
        "tx": if sai sub-block is configured as playback DAI
        "rx": if sai sub-block is configured as capture DAI
   - pinctrl-names: should contain only value "default"
-  - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
+  - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
 
 SAI subnodes Optional properties:
   - st,sync: specify synchronization mode.
index fe54959..1bdc470 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - clocks       : Must contain an entry for each name in clock-names
                    See ../clk/*
 - pinctrl-names        : Uses "default", can use "sleep" if provided
-                   See ../pinctrl/pinctrl-binding.txt
+                   See ../pinctrl/pinctrl-bindings.txt
 
 Optional properties:
 - cs-gpios     : List of GPIO chip selects
index 50a3153..252a05c 100644 (file)
@@ -16,7 +16,7 @@ A child node must exist to represent the core DWC3 IP block. The name of
 the node is not important. The content of the node is defined in dwc3.txt.
 
 Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/rockchip,dwc3-usb-phy.txt
+Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
 
 Example device nodes:
 
index c71a50d..aa03f38 100644 (file)
@@ -57,7 +57,7 @@ device that displays digits), an additional index argument can be specified::
                                          enum gpiod_flags flags)
 
 For a more detailed description of the con_id parameter in the DeviceTree case
-see Documentation/gpio/board.txt
+see Documentation/driver-api/gpio/board.rst
 
 The flags parameter is used to optionally specify a direction and initial value
 for the GPIO. Values can be:
index bee1b9a..6172f3c 100644 (file)
@@ -49,10 +49,10 @@ Device Drivers Base
 Device Drivers DMA Management
 -----------------------------
 
-.. kernel-doc:: drivers/base/dma-coherent.c
+.. kernel-doc:: kernel/dma/coherent.c
    :export:
 
-.. kernel-doc:: drivers/base/dma-mapping.c
+.. kernel-doc:: kernel/dma/mapping.c
    :export:
 
 Device drivers PnP support
index 9f4f87e..75865da 100644 (file)
@@ -42,9 +42,11 @@ Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
 Scott Lovenberg
 Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
 Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
-Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
 Shirish Pargaonkar (for many ACL patches over the years)
 Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+Paulo Alcantara
+Long Li (some great work on RDMA, SMB Direct)
 
 
 Test case and Bug Report contributors
@@ -58,5 +60,4 @@ mention to the Stanford Checker (SWAT) which pointed out many minor
 bugs in error paths.  Valuable suggestions also have come from Al Viro
 and Dave Miller.
 
-And thanks to the IBM LTC and Power test teams and SuSE testers for
-finding multiple bugs during excellent stress test runs.
+And thanks to the IBM LTC and Power test teams and SuSE and Citrix and RedHat testers for finding multiple bugs during excellent stress test runs.
index bc0025c..455e1cc 100644 (file)
@@ -1,3 +1,6 @@
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for
+more current information.
+
 Version 1.62
 ------------
 Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
index c5adf14..852499a 100644 (file)
@@ -9,14 +9,14 @@ is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.02) missing optional features:
    - multichannel (started), integration with RDMA
-   - directory leases (improved metadata caching)
-   - T10 copy offload (copy chunk, and "Duplicate Extents" ioctl
+   - directory leases (improved metadata caching), started (root dir only)
+   - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
      currently the only two server side copy mechanisms supported)
 
 b) improved sparse file support
 
 c) Directory entry caching relies on a 1 second timer, rather than
-using Directory Leases
+using Directory Leases, currently only the root file handle is cached longer
 
 d) quota support (needs minor kernel change since quota calls
 to make it to network filesystems or deviceless filesystems)
@@ -42,6 +42,8 @@ mount or a per server basis to client UIDs or nobody if no mapping
 exists. Also better integration with winbind for resolving SID owners
 
 k) Add tools to take advantage of more smb3 specific ioctls and features
+(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
+is in progress)
 
 l) encrypted file support
 
@@ -71,9 +73,8 @@ t) split cifs and smb3 support into separate modules so legacy (and less
 secure) CIFS dialect can be disabled in environments that don't need it
 and simplify the code.
 
-u) Finish up SMB3.1.1 dialect support
-
-v) POSIX Extensions for SMB3.1.1
+v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
+so far).
 
 KNOWN BUGS
 ====================================
@@ -92,8 +93,8 @@ Misc testing to do
 1) check out max path names and max path name components against various server
 types. Try nested symlinks (8 deep). Return max path name in stat -f information
 
-2) Improve xfstest's cifs enablement and adapt xfstests where needed to test
-cifs better
+2) Improve xfstest's cifs/smb3 enablement and adapt xfstests where needed to test
+cifs/smb3 better
 
 3) Additional performance testing and optimization using iozone and similar - 
 there are some easy changes that can be done to parallelize sequential writes,
index cfd31d9..72d16f0 100644 (file)
@@ -53,7 +53,7 @@ bus supply voltage.
 
 The shunt value in micro-ohms can be set via platform data or device tree at
 compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
-refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
+refer to the Documentation/devicetree/bindings/hwmon/ina2xx.txt for bindings
 if the device tree is used.
 
 Additionally ina226 supports update_interval attribute as described in
index 22208bf..cb3b0de 100644 (file)
@@ -724,8 +724,8 @@ migrate your tool to one of the following options:
 
   See following documents:
 
-  - Documentation/trace/kprobetrace.txt
-  - Documentation/trace/events.txt
+  - Documentation/trace/kprobetrace.rst
+  - Documentation/trace/events.rst
   - tools/perf/Documentation/perf-probe.txt
 
 
index a19db34..22b271d 100644 (file)
@@ -41,7 +41,7 @@ named ``char-misc-next``, you would be using the following command::
 
 that will create a signed tag called ``char-misc-4.15-rc1`` based on the
 last commit in the ``char-misc-next`` branch, and sign it with your gpg key
-(see :ref:`Documentation/maintainer/configure_git.rst <configuregit>`).
+(see :ref:`Documentation/maintainer/configure-git.rst <configuregit>`).
 
 Linus will only accept pull requests based on a signed tag. Other
 maintainers may differ.
index d23c51a..2fd0b51 100644 (file)
@@ -164,7 +164,7 @@ The Linux network devices (by default) just can handle the
 transmission and reception of media dependent frames. Due to the
 arbitration on the CAN bus the transmission of a low prio CAN-ID
 may be delayed by the reception of a high prio CAN frame. To
-reflect the correct [*]_ traffic on the node the loopback of the sent
+reflect the correct [#f1]_ traffic on the node the loopback of the sent
 data has to be performed right after a successful transmission. If
 the CAN network interface is not capable of performing the loopback for
 some reason the SocketCAN core can do this task as a fallback solution.
@@ -175,7 +175,7 @@ networking behaviour for CAN applications. Due to some requests from
 the RT-SocketCAN group the loopback optionally may be disabled for each
 separate socket. See sockopts from the CAN RAW sockets in :ref:`socketcan-raw-sockets`.
 
-.. [*] you really like to have this when you're running analyser
+.. [#f1] you really like to have this when you're running analyser
        tools like 'candump' or 'cansniffer' on the (same) node.
 
 
index 25feb0d..2019a55 100755 (executable)
@@ -53,8 +53,6 @@ from docutils.utils import SystemMessagePropagation
 # common globals
 # ==============================================================================
 
-# The version numbering follows numbering of the specification
-# (Documentation/books/kernel-doc-HOWTO).
 __version__  = '1.0'
 
 PY3 = sys.version_info[0] == 3
index 1d74ad0..efbc832 100644 (file)
@@ -426,5 +426,5 @@ root@genericarmv8:~#
 Details on how to use the generic STM API can be found here [2].
 
 [1]. Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
-[2]. Documentation/trace/stm.txt
+[2]. Documentation/trace/stm.rst
 [3]. https://github.com/Linaro/perf-opencsd
index 1afae55..696dc69 100644 (file)
@@ -8,7 +8,7 @@ Event Tracing
 1. Introduction
 ===============
 
-Tracepoints (see Documentation/trace/tracepoints.txt) can be used
+Tracepoints (see Documentation/trace/tracepoints.rst) can be used
 without creating custom kernel modules to register probe functions
 using the event tracing infrastructure.
 
index 00283b6..1fbc698 100644 (file)
@@ -199,7 +199,7 @@ If @buf is NULL and reset is set, all functions will be enabled for tracing.
 The @buf can also be a glob expression to enable all functions that
 match a specific pattern.
 
-See Filter Commands in :file:`Documentation/trace/ftrace.txt`.
+See Filter Commands in :file:`Documentation/trace/ftrace.rst`.
 
 To just trace the schedule function:
 
index b13771c..e73bcf9 100644 (file)
@@ -7,7 +7,7 @@
 
   Histogram triggers are special event triggers that can be used to
   aggregate trace event data into histograms.  For information on
-  trace events and event triggers, see Documentation/trace/events.txt.
+  trace events and event triggers, see Documentation/trace/events.rst.
 
 
 2. Histogram Trigger Command
index 990f132..19e2d63 100644 (file)
@@ -38,7 +38,7 @@ description is at Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth.
 
 STH registers an stm class device, through which it provides interface
 to userspace and kernelspace software trace sources. See
-Documentation/trace/stm.txt for more information on that.
+Documentation/trace/stm.rst for more information on that.
 
 MSU can be configured to collect trace data into a system memory
 buffer, which can later on be read from its device nodes via read() or
index a4d3ff2..716326b 100644 (file)
@@ -6,7 +6,7 @@ Notes on Analysing Behaviour Using Events and Tracepoints
 1. Introduction
 ===============
 
-Tracepoints (see Documentation/trace/tracepoints.txt) can be used without
+Tracepoints (see Documentation/trace/tracepoints.rst) can be used without
 creating custom kernel modules to register probe functions using the event
 tracing infrastructure.
 
@@ -55,7 +55,7 @@ simple case of::
 3.1 System-Wide Event Enabling
 ------------------------------
 
-See Documentation/trace/events.txt for a proper description on how events
+See Documentation/trace/events.rst for a proper description on how events
 can be enabled system-wide. A short example of enabling all events related
 to page allocation would look something like::
 
@@ -112,7 +112,7 @@ at that point.
 3.4 Local Event Enabling
 ------------------------
 
-Documentation/trace/ftrace.txt describes how to enable events on a per-thread
+Documentation/trace/ftrace.rst describes how to enable events on a per-thread
 basis using set_ftrace_pid.
 
 3.5 Local Event Enablement with PCL
@@ -137,7 +137,7 @@ basis using PCL such as follows.
 4. Event Filtering
 ==================
 
-Documentation/trace/ftrace.txt covers in-depth how to filter events in
+Documentation/trace/ftrace.rst covers in-depth how to filter events in
 ftrace.  Obviously using grep and awk of trace_pipe is an option as well
 as any script reading trace_pipe.
 
index 8d7ed0c..f311638 100644 (file)
@@ -1,5 +1,5 @@
 NOTE:
-This is a version of Documentation/HOWTO translated into Japanese.
+This is a version of Documentation/process/howto.rst translated into Japanese.
 This document is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
 If you find any difference between this document and the original file or
 a problem with the translation, please contact the maintainer of this file.
@@ -109,7 +109,7 @@ linux-api@vger.kernel.org に送ることを勧めます。
     ています。 カーネルに関して初めての人はここからスタートすると良い
     でしょう。
 
-  :ref:`Documentation/Process/changes.rst <changes>`
+  :ref:`Documentation/process/changes.rst <changes>`
     このファイルはカーネルをうまく生成(訳注 build )し、走らせるのに最
     小限のレベルで必要な数々のソフトウェアパッケージの一覧を示してい
     ます。
index 624654b..a8197e0 100644 (file)
@@ -160,7 +160,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다.
     독특한 행동에 관하여 흔히 있는 오해들과 혼란들을 해소하고 있기
     때문이다.
 
-  :ref:`Documentation/process/stable_kernel_rules.rst <stable_kernel_rules>`
+  :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
     이 문서는 안정적인 커널 배포가 이루어지는 규칙을 설명하고 있으며
     여러분들이 이러한 배포들 중 하나에 변경을 하길 원한다면
     무엇을 해야 하는지를 설명한다.
index 929385e..15e7356 100644 (file)
@@ -107,7 +107,7 @@ Linux 2.6:
                程序测试的指导,请参阅
                Documentation/power/drivers-testing.txt。有关驱动程序电
                源管理问题相对全面的概述,请参阅
-               Documentation/power/admin-guide/devices.rst。
+               Documentation/driver-api/pm/devices.rst。
 
 管理:              如果一个驱动程序的作者还在进行有效的维护,那么通常除了那
                些明显正确且不需要任何检查的补丁以外,其他所有的补丁都会
index 4f8bf30..4cb1ba8 100644 (file)
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/gpio.txt
+Chinese translated version of Documentation/gpio
 
 If you have any comment or update to the content, please contact the
 original document maintainer directly.  However, if you have a problem
@@ -10,7 +10,7 @@ Maintainer: Grant Likely <grant.likely@secretlab.ca>
                Linus Walleij <linus.walleij@linaro.org>
 Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
 ---------------------------------------------------------------------
-Documentation/gpio.txt 的中文翻译
+Documentation/gpio 的中文翻译
 
 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
index e592daf..1f8127b 100644 (file)
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/io_orderings.txt
+Chinese translated version of Documentation/io_ordering.txt
 
 If you have any comment or update to the content, please contact the
 original document maintainer directly.  However, if you have a problem
index e9db693..7159cec 100644 (file)
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/magic-number.txt
+Chinese translated version of Documentation/process/magic-number.rst
 
 If you have any comment or update to the content, please post to LKML directly.
 However, if you have problem communicating in English you can also ask the
@@ -7,7 +7,7 @@ translation is outdated or there is problem with translation.
 
 Chinese maintainer: Jia Wei Wei <harryxiyou@gmail.com>
 ---------------------------------------------------------------------
-Documentation/magic-number.txt的中文翻译
+Documentation/process/magic-number.rst的中文翻译
 
 如果想评论或更新本文的内容,请直接发信到LKML。如果你使用英文交流有困难的话,也可
 以向中文版维护者求助。如果本翻译更新不及时或者翻译存在问题,请联系中文版维护者。
index 67ffbf3..e9f2937 100644 (file)
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/video4linux/omap3isp.txt
+Chinese translated version of Documentation/media/v4l-drivers/omap3isp.rst
 
 If you have any comment or update to the content, please contact the
 original document maintainer directly.  However, if you have a problem
@@ -11,7 +11,7 @@ Maintainer: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
          David Cohen <dacohen@gmail.com>
 Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
 ---------------------------------------------------------------------
-Documentation/video4linux/omap3isp.txt 的中文翻译
+Documentation/media/v4l-drivers/omap3isp.rst 的中文翻译
 
 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
index c77c0f0..66c7c56 100644 (file)
@@ -1,4 +1,4 @@
-Chinese translated version of Documentation/video4linux/v4l2-framework.txt
+Chinese translated version of Documentation/media/media_kapi.rst
 
 If you have any comment or update to the content, please contact the
 original document maintainer directly.  However, if you have a problem
@@ -9,7 +9,7 @@ or if there is a problem with the translation.
 Maintainer: Mauro Carvalho Chehab <mchehab@kernel.org>
 Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
 ---------------------------------------------------------------------
-Documentation/video4linux/v4l2-framework.txt 的中文翻译
+Documentation/media/media_kapi.rst 的中文翻译
 
 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
@@ -777,7 +777,7 @@ v4l2 核心 API 提供了一个处理视频缓冲的标准方法(称为“videob
 线性 DMA(videobuf-dma-contig)以及大多用于 USB 设备的用 vmalloc
 分配的缓冲(videobuf-vmalloc)。
 
-请参阅 Documentation/video4linux/videobuf,以获得更多关于 videobuf
+请参阅 Documentation/media/kapi/v4l2-videobuf.rst,以获得更多关于 videobuf
 层的使用信息。
 
 v4l2_fh 结构体
index ebb3168..edf3cf5 100644 (file)
@@ -1732,7 +1732,8 @@ F:        arch/arm/mach-npcm/
 F:     arch/arm/boot/dts/nuvoton-npcm*
 F:     include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
 F:     drivers/*/*npcm*
-F:     Documentation/*/*npcm*
+F:     Documentation/devicetree/bindings/*/*npcm*
+F:     Documentation/devicetree/bindings/*/*/*npcm*
 
 ARM/NUVOTON W90X900 ARM ARCHITECTURE
 M:     Wan ZongShun <mcuos.com@gmail.com>
@@ -3079,7 +3080,7 @@ M:        Clemens Ladisch <clemens@ladisch.de>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 T:     git git://git.alsa-project.org/alsa-kernel.git
 S:     Maintained
-F:     Documentation/sound/alsa/Bt87x.txt
+F:     Documentation/sound/cards/bt87x.rst
 F:     sound/pci/bt87x.c
 
 BT8XXGPIO DRIVER
@@ -3375,7 +3376,7 @@ M:        David Howells <dhowells@redhat.com>
 M:     David Woodhouse <dwmw2@infradead.org>
 L:     keyrings@vger.kernel.org
 S:     Maintained
-F:     Documentation/module-signing.txt
+F:     Documentation/admin-guide/module-signing.rst
 F:     certs/
 F:     scripts/sign-file.c
 F:     scripts/extract-cert.c
@@ -4359,12 +4360,7 @@ L:       iommu@lists.linux-foundation.org
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 S:     Supported
-F:     lib/dma-debug.c
-F:     lib/dma-direct.c
-F:     lib/dma-noncoherent.c
-F:     lib/dma-virt.c
-F:     drivers/base/dma-mapping.c
-F:     drivers/base/dma-coherent.c
+F:     kernel/dma/
 F:     include/asm-generic/dma-mapping.h
 F:     include/linux/dma-direct.h
 F:     include/linux/dma-mapping.h
@@ -4513,7 +4509,7 @@ DRM DRIVER FOR ILITEK ILI9225 PANELS
 M:     David Lechner <david@lechnology.com>
 S:     Maintained
 F:     drivers/gpu/drm/tinydrm/ili9225.c
-F:     Documentation/devicetree/bindings/display/ili9225.txt
+F:     Documentation/devicetree/bindings/display/ilitek,ili9225.txt
 
 DRM DRIVER FOR INTEL I810 VIDEO CARDS
 S:     Orphan / Obsolete
@@ -4599,13 +4595,13 @@ DRM DRIVER FOR SITRONIX ST7586 PANELS
 M:     David Lechner <david@lechnology.com>
 S:     Maintained
 F:     drivers/gpu/drm/tinydrm/st7586.c
-F:     Documentation/devicetree/bindings/display/st7586.txt
+F:     Documentation/devicetree/bindings/display/sitronix,st7586.txt
 
 DRM DRIVER FOR SITRONIX ST7735R PANELS
 M:     David Lechner <david@lechnology.com>
 S:     Maintained
 F:     drivers/gpu/drm/tinydrm/st7735r.c
-F:     Documentation/devicetree/bindings/display/st7735r.txt
+F:     Documentation/devicetree/bindings/display/sitronix,st7735r.txt
 
 DRM DRIVER FOR TDFX VIDEO CARDS
 S:     Orphan / Obsolete
@@ -4638,7 +4634,6 @@ F:        drivers/gpu/drm/
 F:     drivers/gpu/vga/
 F:     Documentation/devicetree/bindings/display/
 F:     Documentation/devicetree/bindings/gpu/
-F:     Documentation/devicetree/bindings/video/
 F:     Documentation/gpu/
 F:     include/drm/
 F:     include/uapi/drm/
@@ -4683,7 +4678,7 @@ M:        Boris Brezillon <boris.brezillon@bootlin.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 F:     drivers/gpu/drm/atmel-hlcdc/
-F:     Documentation/devicetree/bindings/drm/atmel/
+F:     Documentation/devicetree/bindings/display/atmel/
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 
 DRM DRIVERS FOR BRIDGE CHIPS
@@ -4714,7 +4709,7 @@ S:        Supported
 F:     drivers/gpu/drm/fsl-dcu/
 F:     Documentation/devicetree/bindings/display/fsl,dcu.txt
 F:     Documentation/devicetree/bindings/display/fsl,tcon.txt
-F:     Documentation/devicetree/bindings/display/panel/nec,nl4827hc19_05b.txt
+F:     Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
 
 DRM DRIVERS FOR FREESCALE IMX
 M:     Philipp Zabel <p.zabel@pengutronix.de>
@@ -4824,7 +4819,7 @@ M:        Eric Anholt <eric@anholt.net>
 S:     Supported
 F:     drivers/gpu/drm/v3d/
 F:     include/uapi/drm/v3d_drm.h
-F:     Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt
+F:     Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 
 DRM DRIVERS FOR VC4
@@ -5735,7 +5730,7 @@ M:        Madalin Bucur <madalin.bucur@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/freescale/fman
-F:     Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+F:     Documentation/devicetree/bindings/net/fsl-fman.txt
 
 FREESCALE QORIQ PTP CLOCK DRIVER
 M:     Yangbo Lu <yangbo.lu@nxp.com>
@@ -6501,7 +6496,7 @@ L:        linux-mm@kvack.org
 S:     Maintained
 F:     mm/hmm*
 F:     include/linux/hmm*
-F:     Documentation/vm/hmm.txt
+F:     Documentation/vm/hmm.rst
 
 HOST AP DRIVER
 M:     Jouni Malinen <j@w1.fi>
@@ -6966,7 +6961,7 @@ IIO MULTIPLEXER
 M:     Peter Rosin <peda@axentia.se>
 L:     linux-iio@vger.kernel.org
 S:     Maintained
-F:     Documentation/devicetree/bindings/iio/multiplexer/iio-mux.txt
+F:     Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
 F:     drivers/iio/multiplexer/iio-mux.c
 
 IIO SUBSYSTEM AND DRIVERS
@@ -7401,7 +7396,7 @@ F:        drivers/platform/x86/intel-wmi-thunderbolt.c
 INTEL(R) TRACE HUB
 M:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
 S:     Supported
-F:     Documentation/trace/intel_th.txt
+F:     Documentation/trace/intel_th.rst
 F:     drivers/hwtracing/intel_th/
 
 INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
@@ -7425,7 +7420,7 @@ M:        Linus Walleij <linus.walleij@linaro.org>
 L:     linux-iio@vger.kernel.org
 S:     Maintained
 F:     drivers/iio/gyro/mpu3050*
-F:     Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
+F:     Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
 
 IOC3 ETHERNET DRIVER
 M:     Ralf Baechle <ralf@linux-mips.org>
@@ -8700,7 +8695,7 @@ M:        Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
 S:     Maintained
 F:     Documentation/hwmon/max6697
-F:     Documentation/devicetree/bindings/i2c/max6697.txt
+F:     Documentation/devicetree/bindings/hwmon/max6697.txt
 F:     drivers/hwmon/max6697.c
 F:     include/linux/platform_data/max6697.h
 
@@ -9080,7 +9075,7 @@ M:        Martin Donnelly <martin.donnelly@ge.com>
 M:     Martyn Welch <martyn.welch@collabora.co.uk>
 S:     Maintained
 F:     drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
-F:     Documentation/devicetree/bindings/video/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
+F:     Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt
 
 MEGARAID SCSI/SAS DRIVERS
 M:     Kashyap Desai <kashyap.desai@broadcom.com>
@@ -9665,7 +9660,7 @@ F:        include/uapi/linux/mmc/
 MULTIPLEXER SUBSYSTEM
 M:     Peter Rosin <peda@axentia.se>
 S:     Maintained
-F:     Documentation/ABI/testing/mux/sysfs-class-mux*
+F:     Documentation/ABI/testing/sysfs-class-mux*
 F:     Documentation/devicetree/bindings/mux/
 F:     include/linux/dt-bindings/mux/
 F:     include/linux/mux/
@@ -9696,7 +9691,7 @@ MXSFB DRM DRIVER
 M:     Marek Vasut <marex@denx.de>
 S:     Supported
 F:     drivers/gpu/drm/mxsfb/
-F:     Documentation/devicetree/bindings/display/mxsfb-drm.txt
+F:     Documentation/devicetree/bindings/display/mxsfb.txt
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
 M:     Chris Lee <christopher.lee@cspi.com>
@@ -10249,7 +10244,7 @@ F:      arch/powerpc/include/asm/pnv-ocxl.h
 F:     drivers/misc/ocxl/
 F:     include/misc/ocxl*
 F:     include/uapi/misc/ocxl.h
-F:     Documentation/accelerators/ocxl.txt
+F:     Documentation/accelerators/ocxl.rst
 
 OMAP AUDIO SUPPORT
 M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
@@ -10278,18 +10273,16 @@ F:    arch/arm/boot/dts/*am5*
 F:     arch/arm/boot/dts/*dra7*
 
 OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2)
-M:     Tomi Valkeinen <tomi.valkeinen@ti.com>
 L:     linux-omap@vger.kernel.org
 L:     linux-fbdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/video/fbdev/omap2/
 F:     Documentation/arm/OMAP/DSS
 
 OMAP FRAMEBUFFER SUPPORT
-M:     Tomi Valkeinen <tomi.valkeinen@ti.com>
 L:     linux-fbdev@vger.kernel.org
 L:     linux-omap@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/video/fbdev/omap/
 
 OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT
@@ -10733,7 +10726,7 @@ PARALLEL LCD/KEYPAD PANEL DRIVER
 M:     Willy Tarreau <willy@haproxy.com>
 M:     Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 S:     Odd Fixes
-F:     Documentation/misc-devices/lcd-panel-cgram.txt
+F:     Documentation/auxdisplay/lcd-panel-cgram.txt
 F:     drivers/misc/panel.c
 
 PARALLEL PORT SUBSYSTEM
@@ -10890,7 +10883,7 @@ M:      Will Deacon <will.deacon@arm.com>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/pci/controller-generic-pci.txt
+F:     Documentation/devicetree/bindings/pci/host-generic-pci.txt
 F:     drivers/pci/controller/pci-host-common.c
 F:     drivers/pci/controller/pci-host-generic.c
 
@@ -11071,7 +11064,7 @@ M:      Xiaowei Song <songxiaowei@hisilicon.com>
 M:     Binghui Wang <wangbinghui@hisilicon.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
-F:     Documentation/devicetree/bindings/pci/pcie-kirin.txt
+F:     Documentation/devicetree/bindings/pci/kirin-pcie.txt
 F:     drivers/pci/controller/dwc/pcie-kirin.c
 
 PCIE DRIVER FOR HISILICON STB
@@ -12462,7 +12455,7 @@ L:      linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
 F:     drivers/crypto/exynos-rng.c
-F:     Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+F:     Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
 
 SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
 M:     Łukasz Stelmach <l.stelmach@samsung.com>
@@ -13304,7 +13297,7 @@ M:      Vinod Koul <vkoul@kernel.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
 S:     Supported
-F:     Documentation/sound/alsa/compress_offload.txt
+F:     Documentation/sound/designs/compress-offload.rst
 F:     include/sound/compress_driver.h
 F:     include/uapi/sound/compress_*
 F:     sound/core/compress_offload.c
@@ -13325,7 +13318,7 @@ L:      alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://alsa-project.org/main/index.php/ASoC
 S:     Supported
 F:     Documentation/devicetree/bindings/sound/
-F:     Documentation/sound/alsa/soc/
+F:     Documentation/sound/soc/
 F:     sound/soc/
 F:     include/sound/soc*
 
@@ -13584,7 +13577,7 @@ F:      drivers/*/stm32-*timer*
 F:     drivers/pwm/pwm-stm32*
 F:     include/linux/*/stm32-*tim*
 F:     Documentation/ABI/testing/*timer-stm32
-F:     Documentation/devicetree/bindings/*/stm32-*timer
+F:     Documentation/devicetree/bindings/*/stm32-*timer*
 F:     Documentation/devicetree/bindings/pwm/pwm-stm32*
 
 STMMAC ETHERNET DRIVER
@@ -13655,7 +13648,7 @@ M:      Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
 S:     Supported
-F:     lib/swiotlb.c
+F:     kernel/dma/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
@@ -13807,7 +13800,7 @@ SYSTEM TRACE MODULE CLASS
 M:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git
-F:     Documentation/trace/stm.txt
+F:     Documentation/trace/stm.rst
 F:     drivers/hwtracing/stm/
 F:     include/linux/stm.h
 F:     include/uapi/linux/stm.h
@@ -14484,7 +14477,7 @@ M:      Steven Rostedt <rostedt@goodmis.org>
 M:     Ingo Molnar <mingo@redhat.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Maintained
-F:     Documentation/trace/ftrace.txt
+F:     Documentation/trace/ftrace.rst
 F:     arch/*/*/*/ftrace.h
 F:     arch/*/kernel/ftrace.c
 F:     include/*/ftrace.h
@@ -14953,7 +14946,7 @@ M:      Heikki Krogerus <heikki.krogerus@linux.intel.com>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-typec
-F:     Documentation/usb/typec.rst
+F:     Documentation/driver-api/usb/typec.rst
 F:     drivers/usb/typec/
 F:     include/linux/usb/typec.h
 
@@ -15582,6 +15575,13 @@ S:     Maintained
 F:     Documentation/x86/
 F:     arch/x86/
 
+X86 ENTRY CODE
+M:     Andy Lutomirski <luto@kernel.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
+S:     Maintained
+F:     arch/x86/entry/
+
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
 M:     Borislav Petkov <bp@alien8.de>
@@ -15604,7 +15604,7 @@ F:      drivers/platform/x86/
 F:     drivers/platform/olpc/
 
 X86 VDSO
-M:     Andy Lutomirski <luto@amacapital.net>
+M:     Andy Lutomirski <luto@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S:     Maintained
@@ -15782,7 +15782,7 @@ YEALINK PHONE DRIVER
 M:     Henk Vergonet <Henk.Vergonet@gmail.com>
 L:     usbb2k-api-dev@nongnu.org
 S:     Maintained
-F:     Documentation/input/yealink.rst
+F:     Documentation/input/devices/yealink.rst
 F:     drivers/input/misc/yealink.*
 
 Z8530 DRIVER FOR AX.25
index 8a26b59..ca2af1a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
-PATCHLEVEL = 17
+PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index 47b235d..1aa5906 100644 (file)
@@ -403,7 +403,7 @@ config SECCOMP_FILTER
          in terms of Berkeley Packet Filter programs which implement
          task-defined system call filtering polices.
 
-         See Documentation/prctl/seccomp_filter.txt for details.
+         See Documentation/userspace-api/seccomp_filter.rst for details.
 
 preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),$(HOSTCC))
 
index 483d285..54eeb8d 100644 (file)
@@ -1302,7 +1302,7 @@ config SMP
          will run faster if you say N here.
 
          See also <file:Documentation/x86/i386/IO-APIC.txt>,
-         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
          <http://tldp.org/HOWTO/SMP-HOWTO.html>.
 
          If you don't know what to do here, say N.
index 869080b..ec1a5fd 100644 (file)
@@ -35,7 +35,7 @@
  *     Start addresses are inclusive and end addresses are exclusive;
  *     start addresses should be rounded down, end addresses up.
  *
- *     See Documentation/cachetlb.txt for more information.
+ *     See Documentation/core-api/cachetlb.rst for more information.
  *     Please note that the implementation of these, and the required
  *     effects are cache-type (VIVT/VIPT/PIPT) specific.
  *
index 0094c66..d264a72 100644 (file)
@@ -36,7 +36,7 @@
  *     Start addresses are inclusive and end addresses are exclusive; start
  *     addresses should be rounded down, end addresses up.
  *
- *     See Documentation/cachetlb.txt for more information. Please note that
+ *     See Documentation/core-api/cachetlb.rst for more information. Please note that
  *     the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  *     VIPT I-cache.
  *
index ffea82a..b091de7 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/mm.h>
 #include <linux/io.h>
 
-/* Look at Documentation/cachetlb.txt */
+/* Look at Documentation/core-api/cachetlb.rst */
 
 /*
  * Cache handling functions.
index 4d8f64d..c480770 100644 (file)
@@ -275,7 +275,7 @@ config SMP
          machines, but will use only one CPU of a multiprocessor machine.
          On a uniprocessor machine, the kernel will run faster if you say N.
 
-         See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
+         See also <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO
          available at <http://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 0563fd3..480bb02 100644 (file)
@@ -6,36 +6,38 @@
 
 struct css_general_char {
        u64 : 12;
-       u32 dynio : 1;   /* bit 12 */
-       u32 : 4;
-       u32 eadm : 1;    /* bit 17 */
-       u32 : 23;
-       u32 aif : 1;     /* bit 41 */
-       u32 : 3;
-       u32 mcss : 1;    /* bit 45 */
-       u32 fcs : 1;     /* bit 46 */
-       u32 : 1;
-       u32 ext_mb : 1;  /* bit 48 */
-       u32 : 7;
-       u32 aif_tdd : 1; /* bit 56 */
-       u32 : 1;
-       u32 qebsm : 1;   /* bit 58 */
-       u32 : 2;
-       u32 aiv : 1;     /* bit 61 */
-       u32 : 5;
-       u32 aif_osa : 1; /* bit 67 */
-       u32 : 12;
-       u32 eadm_rf : 1; /* bit 80 */
-       u32 : 1;
-       u32 cib : 1;     /* bit 82 */
-       u32 : 5;
-       u32 fcx : 1;     /* bit 88 */
-       u32 : 19;
-       u32 alt_ssi : 1; /* bit 108 */
-       u32 : 1;
-       u32 narf : 1;    /* bit 110 */
-       u32 : 12;
-       u32 util_str : 1;/* bit 123 */
+       u64 dynio : 1;   /* bit 12 */
+       u64 : 4;
+       u64 eadm : 1;    /* bit 17 */
+       u64 : 23;
+       u64 aif : 1;     /* bit 41 */
+       u64 : 3;
+       u64 mcss : 1;    /* bit 45 */
+       u64 fcs : 1;     /* bit 46 */
+       u64 : 1;
+       u64 ext_mb : 1;  /* bit 48 */
+       u64 : 7;
+       u64 aif_tdd : 1; /* bit 56 */
+       u64 : 1;
+       u64 qebsm : 1;   /* bit 58 */
+       u64 : 2;
+       u64 aiv : 1;     /* bit 61 */
+       u64 : 2;
+
+       u64 : 3;
+       u64 aif_osa : 1; /* bit 67 */
+       u64 : 12;
+       u64 eadm_rf : 1; /* bit 80 */
+       u64 : 1;
+       u64 cib : 1;     /* bit 82 */
+       u64 : 5;
+       u64 fcx : 1;     /* bit 88 */
+       u64 : 19;
+       u64 alt_ssi : 1; /* bit 108 */
+       u64 : 1;
+       u64 narf : 1;    /* bit 110 */
+       u64 : 12;
+       u64 util_str : 1;/* bit 123 */
 } __packed;
 
 extern struct css_general_char css_general_characteristics;
index 4bedd1c..dd4f3d3 100644 (file)
@@ -687,7 +687,7 @@ config SMP
          People using multiprocessor machines who say Y here should also say
          Y to "Enhanced Real Time Clock Support", below.
 
-         See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
+         See also <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO
          available at <http://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 9a2b887..0f535de 100644 (file)
@@ -178,7 +178,7 @@ config SMP
          Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
          Management" code will be disabled if you say Y here.
 
-         See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
+         See also <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO
          available at <http://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 3e7f228..20da5a8 100644 (file)
@@ -80,7 +80,7 @@ config MAGIC_SYSRQ
          On UML, this is accomplished by sending a "sysrq" command with
          mconsole, followed by the letter for the requested command.
 
-         The keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+         The keys are documented in <file:Documentation/admin-guide/sysrq.rst>. Don't say Y
          unless you really know what this hack does.
 
 config KERNEL_STACK_ORDER
index 1d9132b..1c8b9f1 100644 (file)
@@ -33,7 +33,7 @@
  *     Start addresses are inclusive and end addresses are exclusive;
  *     start addresses should be rounded down, end addresses up.
  *
- *     See Documentation/cachetlb.txt for more information.
+ *     See Documentation/core-api/cachetlb.rst for more information.
  *     Please note that the implementation of these, and the required
  *     effects are cache-type (VIVT/VIPT/PIPT) specific.
  *
index 7782cdb..82ed001 100644 (file)
@@ -201,7 +201,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        /*
         * Handle seccomp.  regs->ip must be the original value.
-        * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
+        * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst.
         *
         * We could optimize the seccomp disabled case, but performance
         * here doesn't matter.
index 397d6a1..a0d50be 100644 (file)
@@ -88,7 +88,7 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
  *
  * Pages can get remapped. Because this might change the 'color' of that page,
  * we have to flush the cache before the PTE is changed.
- * (see also Documentation/cachetlb.txt)
+ * (see also Documentation/core-api/cachetlb.rst)
  */
 
 #if defined(CONFIG_MMU) && \
@@ -152,7 +152,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
                __invalidate_icache_range(start,(end) - (start));       \
        } while (0)
 
-/* This is not required, see Documentation/cachetlb.txt */
+/* This is not required, see Documentation/core-api/cachetlb.rst */
 #define        flush_icache_page(vma,page)                     do { } while (0)
 
 #define flush_dcache_mmap_lock(mapping)                        do { } while (0)
index 28ec557..eb50fd4 100644 (file)
@@ -114,7 +114,7 @@ config BLK_DEV_THROTTLING
        one needs to mount and use blkio cgroup controller for creating
        cgroups and specifying per device IO rate policies.
 
-       See Documentation/cgroups/blkio-controller.txt for more information.
+       See Documentation/cgroup-v1/blkio-controller.txt for more information.
 
 config BLK_DEV_THROTTLING_LOW
        bool "Block throttling .low limit interface support (EXPERIMENTAL)"
index 70356a2..09b2ee6 100644 (file)
@@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 }
 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
 
-int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
-                        int (fn)(void *, struct request *))
-{
-       int i, j, ret = 0;
-
-       if (WARN_ON_ONCE(!fn))
-               goto out;
-
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               struct blk_mq_tags *tags = set->tags[i];
-
-               if (!tags)
-                       continue;
-
-               for (j = 0; j < tags->nr_tags; j++) {
-                       if (!tags->static_rqs[j])
-                               continue;
-
-                       ret = fn(data, tags->static_rqs[j]);
-                       if (ret)
-                               goto out;
-               }
-       }
-
-out:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
-
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv)
 {
index e9da5e6..70c65bb 100644 (file)
@@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)
 
        if (blk_mq_request_started(rq)) {
                WRITE_ONCE(rq->state, MQ_RQ_IDLE);
+               rq->rq_flags &= ~RQF_TIMED_OUT;
                if (q->dma_drain_size && blk_rq_bytes(rq))
                        rq->nr_phys_segments--;
        }
@@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
 
 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
 {
+       req->rq_flags |= RQF_TIMED_OUT;
        if (req->q->mq_ops->timeout) {
                enum blk_eh_timer_return ret;
 
@@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
                WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
        }
 
+       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_add_timer(req);
 }
 
@@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 
        if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
                return false;
+       if (rq->rq_flags & RQF_TIMED_OUT)
+               return false;
 
        deadline = blk_rq_deadline(rq);
        if (time_after_eq(jiffies, deadline))
@@ -2349,7 +2354,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
 
        mutex_lock(&set->tag_list_lock);
        list_del_rcu(&q->tag_set_list);
-       INIT_LIST_HEAD(&q->tag_set_list);
        if (list_is_singular(&set->tag_list)) {
                /* just transitioned to unshared */
                set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2357,8 +2361,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
                blk_mq_update_tag_set_depth(set, false);
        }
        mutex_unlock(&set->tag_list_lock);
-
        synchronize_rcu();
+       INIT_LIST_HEAD(&q->tag_set_list);
 }
 
 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
index 24b20d8..fbc153a 100644 (file)
@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
         */
        q->queue_tags = tags;
        queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
-       INIT_LIST_HEAD(&q->tag_busy_list);
        return 0;
 }
 EXPORT_SYMBOL(blk_queue_init_tags);
@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
        blk_start_request(rq);
-       list_add(&rq->queuelist, &q->tag_busy_list);
        return 0;
 }
 EXPORT_SYMBOL(blk_queue_start_tag);
-
-/**
- * blk_queue_invalidate_tags - invalidate all pending tags
- * @q:  the request queue for the device
- *
- *  Description:
- *   Hardware conditions may dictate a need to stop all pending requests.
- *   In this case, we will safely clear the block side of the tag queue and
- *   readd all requests to the request queue in the right order.
- **/
-void blk_queue_invalidate_tags(struct request_queue *q)
-{
-       struct list_head *tmp, *n;
-
-       lockdep_assert_held(q->queue_lock);
-
-       list_for_each_safe(tmp, n, &q->tag_busy_list)
-               blk_requeue_request(q, list_entry_rq(tmp));
-}
-EXPORT_SYMBOL(blk_queue_invalidate_tags);
index 132e657..66602c4 100644 (file)
@@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
        struct bsg_device *bd;
        unsigned char buf[32];
 
+       lockdep_assert_held(&bsg_mutex);
+
        if (!blk_get_queue(rq))
                return ERR_PTR(-ENXIO);
 
@@ -707,14 +709,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
        bsg_set_block(bd, file);
 
        atomic_set(&bd->ref_count, 1);
-       mutex_lock(&bsg_mutex);
        hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
 
        strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
        bsg_dbg(bd, "bound to <%s>, max queue %d\n",
                format_dev_t(buf, inode->i_rdev), bd->max_queue);
 
-       mutex_unlock(&bsg_mutex);
        return bd;
 }
 
@@ -722,7 +722,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
 {
        struct bsg_device *bd;
 
-       mutex_lock(&bsg_mutex);
+       lockdep_assert_held(&bsg_mutex);
 
        hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
                if (bd->queue == q) {
@@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
        }
        bd = NULL;
 found:
-       mutex_unlock(&bsg_mutex);
        return bd;
 }
 
@@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
         */
        mutex_lock(&bsg_mutex);
        bcd = idr_find(&bsg_minor_idr, iminor(inode));
-       mutex_unlock(&bsg_mutex);
 
-       if (!bcd)
-               return ERR_PTR(-ENODEV);
+       if (!bcd) {
+               bd = ERR_PTR(-ENODEV);
+               goto out_unlock;
+       }
 
        bd = __bsg_get_device(iminor(inode), bcd->queue);
-       if (bd)
-               return bd;
-
-       bd = bsg_add_device(inode, bcd->queue, file);
+       if (!bd)
+               bd = bsg_add_device(inode, bcd->queue, file);
 
+out_unlock:
+       mutex_unlock(&bsg_mutex);
        return bd;
 }
 
index 5f7663d..c94e93d 100644 (file)
@@ -13,7 +13,7 @@ config MODULE_SIG_KEY
 
          If this option is unchanged from its default "certs/signing_key.pem",
          then the kernel will automatically generate the private key and
-         certificate as described in Documentation/module-signing.txt
+         certificate as described in Documentation/admin-guide/module-signing.rst
 
 config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
index 39aecad..26539e9 100644 (file)
@@ -1,6 +1,6 @@
 /* Asymmetric public-key cryptography key type
  *
- * See Documentation/security/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.txt
  *
  * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index 11b7ba1..2819831 100644 (file)
@@ -1,6 +1,6 @@
 /* Signature verification with an asymmetric key
  *
- * See Documentation/security/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.txt
  *
  * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index b074f24..704f442 100644 (file)
@@ -8,10 +8,7 @@ obj-y                  := component.o core.o bus.o dd.o syscore.o \
                           topology.o container.o property.o cacheinfo.o \
                           devcon.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
-obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-y                          += firmware_loader/
 obj-$(CONFIG_NUMA)     += node.o
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
deleted file mode 100644 (file)
index 597d408..0000000
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Coherent per-device memory handling.
- * Borrowed from i386
- */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-
-struct dma_coherent_mem {
-       void            *virt_base;
-       dma_addr_t      device_base;
-       unsigned long   pfn_base;
-       int             size;
-       int             flags;
-       unsigned long   *bitmap;
-       spinlock_t      spinlock;
-       bool            use_dev_dma_pfn_offset;
-};
-
-static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
-
-static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
-{
-       if (dev && dev->dma_mem)
-               return dev->dma_mem;
-       return NULL;
-}
-
-static inline dma_addr_t dma_get_device_base(struct device *dev,
-                                            struct dma_coherent_mem * mem)
-{
-       if (mem->use_dev_dma_pfn_offset)
-               return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
-       else
-               return mem->device_base;
-}
-
-static int dma_init_coherent_memory(
-       phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
-       struct dma_coherent_mem **mem)
-{
-       struct dma_coherent_mem *dma_mem = NULL;
-       void __iomem *mem_base = NULL;
-       int pages = size >> PAGE_SHIFT;
-       int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-       int ret;
-
-       if (!size) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       mem_base = memremap(phys_addr, size, MEMREMAP_WC);
-       if (!mem_base) {
-               ret = -EINVAL;
-               goto out;
-       }
-       dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-       if (!dma_mem) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-       if (!dma_mem->bitmap) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       dma_mem->virt_base = mem_base;
-       dma_mem->device_base = device_addr;
-       dma_mem->pfn_base = PFN_DOWN(phys_addr);
-       dma_mem->size = pages;
-       dma_mem->flags = flags;
-       spin_lock_init(&dma_mem->spinlock);
-
-       *mem = dma_mem;
-       return 0;
-
-out:
-       kfree(dma_mem);
-       if (mem_base)
-               memunmap(mem_base);
-       return ret;
-}
-
-static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
-{
-       if (!mem)
-               return;
-
-       memunmap(mem->virt_base);
-       kfree(mem->bitmap);
-       kfree(mem);
-}
-
-static int dma_assign_coherent_memory(struct device *dev,
-                                     struct dma_coherent_mem *mem)
-{
-       if (!dev)
-               return -ENODEV;
-
-       if (dev->dma_mem)
-               return -EBUSY;
-
-       dev->dma_mem = mem;
-       return 0;
-}
-
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-                               dma_addr_t device_addr, size_t size, int flags)
-{
-       struct dma_coherent_mem *mem;
-       int ret;
-
-       ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
-       if (ret)
-               return ret;
-
-       ret = dma_assign_coherent_memory(dev, mem);
-       if (ret)
-               dma_release_coherent_memory(mem);
-       return ret;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
-       struct dma_coherent_mem *mem = dev->dma_mem;
-
-       if (!mem)
-               return;
-       dma_release_coherent_memory(mem);
-       dev->dma_mem = NULL;
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
-                                       dma_addr_t device_addr, size_t size)
-{
-       struct dma_coherent_mem *mem = dev->dma_mem;
-       unsigned long flags;
-       int pos, err;
-
-       size += device_addr & ~PAGE_MASK;
-
-       if (!mem)
-               return ERR_PTR(-EINVAL);
-
-       spin_lock_irqsave(&mem->spinlock, flags);
-       pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
-       err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
-       spin_unlock_irqrestore(&mem->spinlock, flags);
-
-       if (err != 0)
-               return ERR_PTR(err);
-       return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
-static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
-               ssize_t size, dma_addr_t *dma_handle)
-{
-       int order = get_order(size);
-       unsigned long flags;
-       int pageno;
-       void *ret;
-
-       spin_lock_irqsave(&mem->spinlock, flags);
-
-       if (unlikely(size > (mem->size << PAGE_SHIFT)))
-               goto err;
-
-       pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
-       if (unlikely(pageno < 0))
-               goto err;
-
-       /*
-        * Memory was found in the coherent area.
-        */
-       *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
-       ret = mem->virt_base + (pageno << PAGE_SHIFT);
-       spin_unlock_irqrestore(&mem->spinlock, flags);
-       memset(ret, 0, size);
-       return ret;
-err:
-       spin_unlock_irqrestore(&mem->spinlock, flags);
-       return NULL;
-}
-
-/**
- * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
- * @dev:       device from which we allocate memory
- * @size:      size of requested memory area
- * @dma_handle:        This will be filled with the correct dma handle
- * @ret:       This pointer will be filled with the virtual address
- *             to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
-               dma_addr_t *dma_handle, void **ret)
-{
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
-       if (!mem)
-               return 0;
-
-       *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
-       if (*ret)
-               return 1;
-
-       /*
-        * In the case where the allocation can not be satisfied from the
-        * per-device area, try to fall back to generic memory if the
-        * constraints allow it.
-        */
-       return mem->flags & DMA_MEMORY_EXCLUSIVE;
-}
-EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
-{
-       if (!dma_coherent_default_memory)
-               return NULL;
-
-       return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
-                       dma_handle);
-}
-
-static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
-                                      int order, void *vaddr)
-{
-       if (mem && vaddr >= mem->virt_base && vaddr <
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-               unsigned long flags;
-
-               spin_lock_irqsave(&mem->spinlock, flags);
-               bitmap_release_region(mem->bitmap, page, order);
-               spin_unlock_irqrestore(&mem->spinlock, flags);
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * dma_release_from_dev_coherent() - free memory to device coherent memory pool
- * @dev:       device from which the memory was allocated
- * @order:     the order of pages allocated
- * @vaddr:     virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if the caller should
- * proceed with releasing memory from generic pools.
- */
-int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
-{
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
-       return __dma_release_from_coherent(mem, order, vaddr);
-}
-EXPORT_SYMBOL(dma_release_from_dev_coherent);
-
-int dma_release_from_global_coherent(int order, void *vaddr)
-{
-       if (!dma_coherent_default_memory)
-               return 0;
-
-       return __dma_release_from_coherent(dma_coherent_default_memory, order,
-                       vaddr);
-}
-
-static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
-               struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
-{
-       if (mem && vaddr >= mem->virt_base && vaddr + size <=
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-               unsigned long off = vma->vm_pgoff;
-               int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-               int user_count = vma_pages(vma);
-               int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-               *ret = -ENXIO;
-               if (off < count && user_count <= count - off) {
-                       unsigned long pfn = mem->pfn_base + start + off;
-                       *ret = remap_pfn_range(vma, vma->vm_start, pfn,
-                                              user_count << PAGE_SHIFT,
-                                              vma->vm_page_prot);
-               }
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
- * @dev:       device from which the memory was allocated
- * @vma:       vm_area for the userspace memory
- * @vaddr:     cpu address returned by dma_alloc_from_dev_coherent
- * @size:      size of the memory buffer allocated
- * @ret:       result from remap_pfn_range()
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
- *
- * Returns 1 if @vaddr belongs to the device coherent pool and the caller
- * should return @ret, or 0 if they should proceed with mapping memory from
- * generic areas.
- */
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
-                          void *vaddr, size_t size, int *ret)
-{
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
-       return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
-}
-EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
-
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
-                                  size_t size, int *ret)
-{
-       if (!dma_coherent_default_memory)
-               return 0;
-
-       return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
-                                       vaddr, size, ret);
-}
-
-/*
- * Support for reserved memory regions defined in device tree
- */
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-static struct reserved_mem *dma_reserved_default_memory __initdata;
-
-static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
-{
-       struct dma_coherent_mem *mem = rmem->priv;
-       int ret;
-
-       if (!mem) {
-               ret = dma_init_coherent_memory(rmem->base, rmem->base,
-                                              rmem->size,
-                                              DMA_MEMORY_EXCLUSIVE, &mem);
-               if (ret) {
-                       pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
-                               &rmem->base, (unsigned long)rmem->size / SZ_1M);
-                       return ret;
-               }
-       }
-       mem->use_dev_dma_pfn_offset = true;
-       rmem->priv = mem;
-       dma_assign_coherent_memory(dev, mem);
-       return 0;
-}
-
-static void rmem_dma_device_release(struct reserved_mem *rmem,
-                                   struct device *dev)
-{
-       if (dev)
-               dev->dma_mem = NULL;
-}
-
-static const struct reserved_mem_ops rmem_dma_ops = {
-       .device_init    = rmem_dma_device_init,
-       .device_release = rmem_dma_device_release,
-};
-
-static int __init rmem_dma_setup(struct reserved_mem *rmem)
-{
-       unsigned long node = rmem->fdt_node;
-
-       if (of_get_flat_dt_prop(node, "reusable", NULL))
-               return -EINVAL;
-
-#ifdef CONFIG_ARM
-       if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
-               pr_err("Reserved memory: regions without no-map are not yet supported\n");
-               return -EINVAL;
-       }
-
-       if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
-               WARN(dma_reserved_default_memory,
-                    "Reserved memory: region for default DMA coherent area is redefined\n");
-               dma_reserved_default_memory = rmem;
-       }
-#endif
-
-       rmem->ops = &rmem_dma_ops;
-       pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
-               &rmem->base, (unsigned long)rmem->size / SZ_1M);
-       return 0;
-}
-
-static int __init dma_init_reserved_memory(void)
-{
-       const struct reserved_mem_ops *ops;
-       int ret;
-
-       if (!dma_reserved_default_memory)
-               return -ENOMEM;
-
-       ops = dma_reserved_default_memory->ops;
-
-       /*
-        * We rely on rmem_dma_device_init() does not propagate error of
-        * dma_assign_coherent_memory() for "NULL" device.
-        */
-       ret = ops->device_init(dma_reserved_default_memory, NULL);
-
-       if (!ret) {
-               dma_coherent_default_memory = dma_reserved_default_memory->priv;
-               pr_info("DMA: default coherent area is set\n");
-       }
-
-       return ret;
-}
-
-core_initcall(dma_init_reserved_memory);
-
-RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
-#endif
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
deleted file mode 100644 (file)
index d987dcd..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Contiguous Memory Allocator for DMA mapping framework
- * Copyright (c) 2010-2011 by Samsung Electronics.
- * Written by:
- *     Marek Szyprowski <m.szyprowski@samsung.com>
- *     Michal Nazarewicz <mina86@mina86.com>
- */
-
-#define pr_fmt(fmt) "cma: " fmt
-
-#ifdef CONFIG_CMA_DEBUG
-#ifndef DEBUG
-#  define DEBUG
-#endif
-#endif
-
-#include <asm/page.h>
-#include <asm/dma-contiguous.h>
-
-#include <linux/memblock.h>
-#include <linux/err.h>
-#include <linux/sizes.h>
-#include <linux/dma-contiguous.h>
-#include <linux/cma.h>
-
-#ifdef CONFIG_CMA_SIZE_MBYTES
-#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
-#else
-#define CMA_SIZE_MBYTES 0
-#endif
-
-struct cma *dma_contiguous_default_area;
-
-/*
- * Default global CMA area size can be defined in kernel's .config.
- * This is useful mainly for distro maintainers to create a kernel
- * that works correctly for most supported systems.
- * The size can be set in bytes or as a percentage of the total memory
- * in the system.
- *
- * Users, who want to set the size of global CMA area for their system
- * should use cma= kernel parameter.
- */
-static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
-static phys_addr_t size_cmdline = -1;
-static phys_addr_t base_cmdline;
-static phys_addr_t limit_cmdline;
-
-static int __init early_cma(char *p)
-{
-       pr_debug("%s(%s)\n", __func__, p);
-       size_cmdline = memparse(p, &p);
-       if (*p != '@')
-               return 0;
-       base_cmdline = memparse(p + 1, &p);
-       if (*p != '-') {
-               limit_cmdline = base_cmdline + size_cmdline;
-               return 0;
-       }
-       limit_cmdline = memparse(p + 1, &p);
-
-       return 0;
-}
-early_param("cma", early_cma);
-
-#ifdef CONFIG_CMA_SIZE_PERCENTAGE
-
-static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
-{
-       struct memblock_region *reg;
-       unsigned long total_pages = 0;
-
-       /*
-        * We cannot use memblock_phys_mem_size() here, because
-        * memblock_analyze() has not been called yet.
-        */
-       for_each_memblock(memory, reg)
-               total_pages += memblock_region_memory_end_pfn(reg) -
-                              memblock_region_memory_base_pfn(reg);
-
-       return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
-}
-
-#else
-
-static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
-{
-       return 0;
-}
-
-#endif
-
-/**
- * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory.
- */
-void __init dma_contiguous_reserve(phys_addr_t limit)
-{
-       phys_addr_t selected_size = 0;
-       phys_addr_t selected_base = 0;
-       phys_addr_t selected_limit = limit;
-       bool fixed = false;
-
-       pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
-
-       if (size_cmdline != -1) {
-               selected_size = size_cmdline;
-               selected_base = base_cmdline;
-               selected_limit = min_not_zero(limit_cmdline, limit);
-               if (base_cmdline + size_cmdline == limit_cmdline)
-                       fixed = true;
-       } else {
-#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
-               selected_size = size_bytes;
-#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
-               selected_size = cma_early_percent_memory();
-#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
-               selected_size = min(size_bytes, cma_early_percent_memory());
-#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
-               selected_size = max(size_bytes, cma_early_percent_memory());
-#endif
-       }
-
-       if (selected_size && !dma_contiguous_default_area) {
-               pr_debug("%s: reserving %ld MiB for global area\n", __func__,
-                        (unsigned long)selected_size / SZ_1M);
-
-               dma_contiguous_reserve_area(selected_size, selected_base,
-                                           selected_limit,
-                                           &dma_contiguous_default_area,
-                                           fixed);
-       }
-}
-
-/**
- * dma_contiguous_reserve_area() - reserve custom contiguous area
- * @size: Size of the reserved area (in bytes),
- * @base: Base address of the reserved area optional, use 0 for any
- * @limit: End address of the reserved memory (optional, 0 for any).
- * @res_cma: Pointer to store the created cma region.
- * @fixed: hint about where to place the reserved area
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory. This function allows to create custom reserved areas for specific
- * devices.
- *
- * If @fixed is true, reserve contiguous area at exactly @base.  If false,
- * reserve in range from @base to @limit.
- */
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
-                                      phys_addr_t limit, struct cma **res_cma,
-                                      bool fixed)
-{
-       int ret;
-
-       ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
-                                       "reserved", res_cma);
-       if (ret)
-               return ret;
-
-       /* Architecture specific contiguous memory fixup. */
-       dma_contiguous_early_fixup(cma_get_base(*res_cma),
-                               cma_get_size(*res_cma));
-
-       return 0;
-}
-
-/**
- * dma_alloc_from_contiguous() - allocate pages from contiguous area
- * @dev:   Pointer to device for which the allocation is performed.
- * @count: Requested number of pages.
- * @align: Requested alignment of pages (in PAGE_SIZE order).
- * @gfp_mask: GFP flags to use for this allocation.
- *
- * This function allocates memory buffer for specified device. It uses
- * device specific contiguous memory area if available or the default
- * global one. Requires architecture specific dev_get_cma_area() helper
- * function.
- */
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
-                                      unsigned int align, gfp_t gfp_mask)
-{
-       if (align > CONFIG_CMA_ALIGNMENT)
-               align = CONFIG_CMA_ALIGNMENT;
-
-       return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
-}
-
-/**
- * dma_release_from_contiguous() - release allocated pages
- * @dev:   Pointer to device for which the pages were allocated.
- * @pages: Allocated pages.
- * @count: Number of allocated pages.
- *
- * This function releases memory allocated by dma_alloc_from_contiguous().
- * It returns false when provided pages do not belong to contiguous area and
- * true otherwise.
- */
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
-                                int count)
-{
-       return cma_release(dev_get_cma_area(dev), pages, count);
-}
-
-/*
- * Support for reserved memory regions defined in device tree
- */
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-#undef pr_fmt
-#define pr_fmt(fmt) fmt
-
-static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
-{
-       dev_set_cma_area(dev, rmem->priv);
-       return 0;
-}
-
-static void rmem_cma_device_release(struct reserved_mem *rmem,
-                                   struct device *dev)
-{
-       dev_set_cma_area(dev, NULL);
-}
-
-static const struct reserved_mem_ops rmem_cma_ops = {
-       .device_init    = rmem_cma_device_init,
-       .device_release = rmem_cma_device_release,
-};
-
-static int __init rmem_cma_setup(struct reserved_mem *rmem)
-{
-       phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
-       phys_addr_t mask = align - 1;
-       unsigned long node = rmem->fdt_node;
-       struct cma *cma;
-       int err;
-
-       if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
-           of_get_flat_dt_prop(node, "no-map", NULL))
-               return -EINVAL;
-
-       if ((rmem->base & mask) || (rmem->size & mask)) {
-               pr_err("Reserved memory: incorrect alignment of CMA region\n");
-               return -EINVAL;
-       }
-
-       err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
-       if (err) {
-               pr_err("Reserved memory: unable to setup CMA region\n");
-               return err;
-       }
-       /* Architecture specific contiguous memory fixup. */
-       dma_contiguous_early_fixup(rmem->base, rmem->size);
-
-       if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
-               dma_contiguous_set_default(cma);
-
-       rmem->ops = &rmem_cma_ops;
-       rmem->priv = cma;
-
-       pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
-               &rmem->base, (unsigned long)rmem->size / SZ_1M);
-
-       return 0;
-}
-RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
-#endif
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
deleted file mode 100644 (file)
index f831a58..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
- *
- * Copyright (c) 2006  SUSE Linux Products GmbH
- * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
- */
-
-#include <linux/acpi.h>
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-/*
- * Managed DMA API
- */
-struct dma_devres {
-       size_t          size;
-       void            *vaddr;
-       dma_addr_t      dma_handle;
-       unsigned long   attrs;
-};
-
-static void dmam_release(struct device *dev, void *res)
-{
-       struct dma_devres *this = res;
-
-       dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
-                       this->attrs);
-}
-
-static int dmam_match(struct device *dev, void *res, void *match_data)
-{
-       struct dma_devres *this = res, *match = match_data;
-
-       if (this->vaddr == match->vaddr) {
-               WARN_ON(this->size != match->size ||
-                       this->dma_handle != match->dma_handle);
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * dmam_alloc_coherent - Managed dma_alloc_coherent()
- * @dev: Device to allocate coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- *
- * Managed dma_alloc_coherent().  Memory allocated using this function
- * will be automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void *dmam_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t gfp)
-{
-       struct dma_devres *dr;
-       void *vaddr;
-
-       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
-       if (!dr)
-               return NULL;
-
-       vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
-       if (!vaddr) {
-               devres_free(dr);
-               return NULL;
-       }
-
-       dr->vaddr = vaddr;
-       dr->dma_handle = *dma_handle;
-       dr->size = size;
-
-       devres_add(dev, dr);
-
-       return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_coherent);
-
-/**
- * dmam_free_coherent - Managed dma_free_coherent()
- * @dev: Device to free coherent memory for
- * @size: Size of allocation
- * @vaddr: Virtual address of the memory to free
- * @dma_handle: DMA handle of the memory to free
- *
- * Managed dma_free_coherent().
- */
-void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
-                       dma_addr_t dma_handle)
-{
-       struct dma_devres match_data = { size, vaddr, dma_handle };
-
-       dma_free_coherent(dev, size, vaddr, dma_handle);
-       WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
-}
-EXPORT_SYMBOL(dmam_free_coherent);
-
-/**
- * dmam_alloc_attrs - Managed dma_alloc_attrs()
- * @dev: Device to allocate non_coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- * @attrs: Flags in the DMA_ATTR_* namespace.
- *
- * Managed dma_alloc_attrs().  Memory allocated using this function will be
- * automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
-{
-       struct dma_devres *dr;
-       void *vaddr;
-
-       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
-       if (!dr)
-               return NULL;
-
-       vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
-       if (!vaddr) {
-               devres_free(dr);
-               return NULL;
-       }
-
-       dr->vaddr = vaddr;
-       dr->dma_handle = *dma_handle;
-       dr->size = size;
-       dr->attrs = attrs;
-
-       devres_add(dev, dr);
-
-       return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_attrs);
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-
-static void dmam_coherent_decl_release(struct device *dev, void *res)
-{
-       dma_release_declared_memory(dev);
-}
-
-/**
- * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
- * @dev: Device to declare coherent memory for
- * @phys_addr: Physical address of coherent memory to be declared
- * @device_addr: Device address of coherent memory to be declared
- * @size: Size of coherent memory to be declared
- * @flags: Flags
- *
- * Managed dma_declare_coherent_memory().
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-                                dma_addr_t device_addr, size_t size, int flags)
-{
-       void *res;
-       int rc;
-
-       res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
-       if (!res)
-               return -ENOMEM;
-
-       rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
-                                        flags);
-       if (!rc)
-               devres_add(dev, res);
-       else
-               devres_free(res);
-
-       return rc;
-}
-EXPORT_SYMBOL(dmam_declare_coherent_memory);
-
-/**
- * dmam_release_declared_memory - Managed dma_release_declared_memory().
- * @dev: Device to release declared coherent memory for
- *
- * Managed dmam_release_declared_memory().
- */
-void dmam_release_declared_memory(struct device *dev)
-{
-       WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
-}
-EXPORT_SYMBOL(dmam_release_declared_memory);
-
-#endif
-
-/*
- * Create scatter-list for the already allocated DMA buffer.
- */
-int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                void *cpu_addr, dma_addr_t handle, size_t size)
-{
-       struct page *page = virt_to_page(cpu_addr);
-       int ret;
-
-       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-       if (unlikely(ret))
-               return ret;
-
-       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-       return 0;
-}
-EXPORT_SYMBOL(dma_common_get_sgtable);
-
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       int ret = -ENXIO;
-#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
-       unsigned long user_count = vma_pages(vma);
-       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long off = vma->vm_pgoff;
-
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
-       if (off < count && user_count <= (count - off))
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     page_to_pfn(virt_to_page(cpu_addr)) + off,
-                                     user_count << PAGE_SHIFT,
-                                     vma->vm_page_prot);
-#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
-
-       return ret;
-}
-EXPORT_SYMBOL(dma_common_mmap);
-
-#ifdef CONFIG_MMU
-static struct vm_struct *__dma_common_pages_remap(struct page **pages,
-                       size_t size, unsigned long vm_flags, pgprot_t prot,
-                       const void *caller)
-{
-       struct vm_struct *area;
-
-       area = get_vm_area_caller(size, vm_flags, caller);
-       if (!area)
-               return NULL;
-
-       if (map_vm_area(area, prot, pages)) {
-               vunmap(area->addr);
-               return NULL;
-       }
-
-       return area;
-}
-
-/*
- * remaps an array of PAGE_SIZE pages into another vm_area
- * Cannot be used in non-sleeping contexts
- */
-void *dma_common_pages_remap(struct page **pages, size_t size,
-                       unsigned long vm_flags, pgprot_t prot,
-                       const void *caller)
-{
-       struct vm_struct *area;
-
-       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
-       if (!area)
-               return NULL;
-
-       area->pages = pages;
-
-       return area->addr;
-}
-
-/*
- * remaps an allocated contiguous region into another vm_area.
- * Cannot be used in non-sleeping contexts
- */
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
-                       unsigned long vm_flags,
-                       pgprot_t prot, const void *caller)
-{
-       int i;
-       struct page **pages;
-       struct vm_struct *area;
-
-       pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
-       if (!pages)
-               return NULL;
-
-       for (i = 0; i < (size >> PAGE_SHIFT); i++)
-               pages[i] = nth_page(page, i);
-
-       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
-
-       kfree(pages);
-
-       if (!area)
-               return NULL;
-       return area->addr;
-}
-
-/*
- * unmaps a range previously mapped by dma_common_*_remap
- */
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
-{
-       struct vm_struct *area = find_vm_area(cpu_addr);
-
-       if (!area || (area->flags & vm_flags) != vm_flags) {
-               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
-               return;
-       }
-
-       unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
-       vunmap(cpu_addr);
-}
-#endif
-
-/*
- * enables DMA API use for a device
- */
-int dma_configure(struct device *dev)
-{
-       if (dev->bus->dma_configure)
-               return dev->bus->dma_configure(dev);
-       return 0;
-}
-
-void dma_deconfigure(struct device *dev)
-{
-       of_dma_deconfigure(dev);
-       acpi_dma_deconfigure(dev);
-}
index 410c30c..212f447 100644 (file)
@@ -81,7 +81,7 @@ config PRINTER
          corresponding drivers into the kernel.
 
          To compile this driver as a module, choose M here and read
-         <file:Documentation/parport.txt>.  The module will be called lp.
+         <file:Documentation/admin-guide/parport.rst>.  The module will be called lp.
 
          If you have several parallel ports, you can specify which ports to
          use with the "lp" kernel command line option.  (Try "man bootparam"
index a24a6af..9760b52 100644 (file)
@@ -6,7 +6,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- * Standard functionality for the common clock API.  See Documentation/clk.txt
+ * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
  */
 
 #include <linux/clk.h>
@@ -2747,7 +2747,7 @@ static int __clk_core_init(struct clk_core *core)
                goto out;
        }
 
-       /* check that clk_ops are sane.  See Documentation/clk.txt */
+       /* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
        if (core->ops->set_rate &&
            !((core->ops->round_rate || core->ops->determine_rate) &&
              core->ops->recalc_rate)) {
index 5421923..502bcbb 100644 (file)
@@ -194,7 +194,7 @@ struct ingenic_cgu {
 
 /**
  * struct ingenic_clk - private data for a clock
- * @hw: see Documentation/clk.txt
+ * @hw: see Documentation/driver-api/clk.rst
  * @cgu: a pointer to the CGU data
  * @idx: the index of this clock in cgu->clock_info
  */
index b451354..08ba847 100644 (file)
@@ -38,7 +38,7 @@
  * Each device has a channels list, which runs unlocked but is never modified
  * once the device is registered, it's just setup by the driver.
  *
- * See Documentation/dmaengine.txt for more details
+ * See Documentation/driver-api/dmaengine for more details
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index 951b6c7..624a11c 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku,         0444, DMI_PRODUCT_SKU);
 DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
        ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
+       ADD_DMI_ATTR(product_sku,       DMI_PRODUCT_SKU);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54e66ad..f248354 100644 (file)
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
                dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
index c987c82..0426d66 100644 (file)
@@ -2,7 +2,6 @@ config DRM_SHMOBILE
        tristate "DRM Support for SH Mobile"
        depends on DRM && ARM
        depends on ARCH_SHMOBILE || COMPILE_TEST
-       depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
        select BACKLIGHT_CLASS_DEVICE
        select BACKLIGHT_LCD_SUPPORT
        select DRM_KMS_HELPER
index e773893..40df888 100644 (file)
@@ -21,8 +21,6 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_plane_helper.h>
 
-#include <video/sh_mobile_meram.h>
-
 #include "shmob_drm_backlight.h"
 #include "shmob_drm_crtc.h"
 #include "shmob_drm_drv.h"
@@ -47,20 +45,12 @@ static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
                if (ret < 0)
                        return ret;
        }
-#if 0
-       if (sdev->meram_dev && sdev->meram_dev->pdev)
-               pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
-#endif
 
        return 0;
 }
 
 static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
 {
-#if 0
-       if (sdev->meram_dev && sdev->meram_dev->pdev)
-               pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
-#endif
        if (sdev->clock)
                clk_disable_unprepare(sdev->clock);
 }
@@ -269,12 +259,6 @@ static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
        if (!scrtc->started)
                return;
 
-       /* Disable the MERAM cache. */
-       if (scrtc->cache) {
-               sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
-               scrtc->cache = NULL;
-       }
-
        /* Stop the LCDC. */
        shmob_drm_crtc_start_stop(scrtc, false);
 
@@ -305,7 +289,6 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
 {
        struct drm_crtc *crtc = &scrtc->crtc;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
        struct drm_gem_cma_object *gem;
        unsigned int bpp;
 
@@ -321,11 +304,6 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
                              + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
                              + x * (bpp == 16 ? 2 : 1);
        }
-
-       if (scrtc->cache)
-               sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
-                                            scrtc->dma[0], scrtc->dma[1],
-                                            &scrtc->dma[0], &scrtc->dma[1]);
 }
 
 static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
@@ -372,9 +350,7 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
 {
        struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
        struct shmob_drm_device *sdev = crtc->dev->dev_private;
-       const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
        const struct shmob_drm_format_info *format;
-       void *cache;
 
        format = shmob_drm_format_info(crtc->primary->fb->format->format);
        if (format == NULL) {
@@ -386,24 +362,6 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
        scrtc->format = format;
        scrtc->line_size = crtc->primary->fb->pitches[0];
 
-       if (sdev->meram) {
-               /* Enable MERAM cache if configured. We need to de-init
-                * configured ICBs before we can re-initialize them.
-                */
-               if (scrtc->cache) {
-                       sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
-                       scrtc->cache = NULL;
-               }
-
-               cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
-                                                   crtc->primary->fb->pitches[0],
-                                                   adjusted_mode->vdisplay,
-                                                   format->meram,
-                                                   &scrtc->line_size);
-               if (!IS_ERR(cache))
-                       scrtc->cache = cache;
-       }
-
        shmob_drm_crtc_compute_base(scrtc, x, y);
 
        return 0;
index f152973..c11f421 100644 (file)
@@ -28,7 +28,6 @@ struct shmob_drm_crtc {
        int dpms;
 
        const struct shmob_drm_format_info *format;
-       void *cache;
        unsigned long dma[2];
        unsigned int line_size;
        bool started;
index 02ea315..088a6e5 100644 (file)
@@ -23,7 +23,6 @@
 struct clk;
 struct device;
 struct drm_device;
-struct sh_mobile_meram_info;
 
 struct shmob_drm_device {
        struct device *dev;
@@ -31,7 +30,6 @@ struct shmob_drm_device {
 
        void __iomem *mmio;
        struct clk *clock;
-       struct sh_mobile_meram_info *meram;
        u32 lddckr;
        u32 ldmt1r;
 
index d36919b..4476385 100644 (file)
@@ -18,8 +18,6 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 
-#include <video/sh_mobile_meram.h>
-
 #include "shmob_drm_crtc.h"
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
@@ -35,55 +33,46 @@ static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
                .bpp = 16,
                .yuv = false,
                .lddfr = LDDFR_PKF_RGB16,
-               .meram = SH_MOBILE_MERAM_PF_RGB,
        }, {
                .fourcc = DRM_FORMAT_RGB888,
                .bpp = 24,
                .yuv = false,
                .lddfr = LDDFR_PKF_RGB24,
-               .meram = SH_MOBILE_MERAM_PF_RGB,
        }, {
                .fourcc = DRM_FORMAT_ARGB8888,
                .bpp = 32,
                .yuv = false,
                .lddfr = LDDFR_PKF_ARGB32,
-               .meram = SH_MOBILE_MERAM_PF_RGB,
        }, {
                .fourcc = DRM_FORMAT_NV12,
                .bpp = 12,
                .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_420,
-               .meram = SH_MOBILE_MERAM_PF_NV,
        }, {
                .fourcc = DRM_FORMAT_NV21,
                .bpp = 12,
                .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_420,
-               .meram = SH_MOBILE_MERAM_PF_NV,
        }, {
                .fourcc = DRM_FORMAT_NV16,
                .bpp = 16,
                .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_422,
-               .meram = SH_MOBILE_MERAM_PF_NV,
        }, {
                .fourcc = DRM_FORMAT_NV61,
                .bpp = 16,
                .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_422,
-               .meram = SH_MOBILE_MERAM_PF_NV,
        }, {
                .fourcc = DRM_FORMAT_NV24,
                .bpp = 24,
                .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_444,
-               .meram = SH_MOBILE_MERAM_PF_NV24,
        }, {
                .fourcc = DRM_FORMAT_NV42,
                .bpp = 24,
                .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_444,
-               .meram = SH_MOBILE_MERAM_PF_NV24,
        },
 };
 
index 06d5b7c..753e281 100644 (file)
@@ -24,7 +24,6 @@ struct shmob_drm_format_info {
        unsigned int bpp;
        bool yuv;
        u32 lddfr;
-       unsigned int meram;
 };
 
 const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
index 97f6e4a..1d0359f 100644 (file)
@@ -17,8 +17,6 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 
-#include <video/sh_mobile_meram.h>
-
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
 #include "shmob_drm_plane.h"
index 29437ea..b677e5d 100644 (file)
@@ -6,7 +6,7 @@ config VGA_ARB
          Some "legacy" VGA devices implemented on PCI typically have the same
          hard-decoded addresses as they did on ISA. When multiple PCI devices
          are accessed at same time they need some kind of coordination. Please
-         see Documentation/vgaarbiter.txt for more details. Select this to
+         see Documentation/gpu/vgaarbiter.rst for more details. Select this to
          enable VGA arbiter.
 
 config VGA_ARB_MAX_GPUS
index 1c5e74c..c61b045 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * vgaarb.c: Implements the VGA arbitration. For details refer to
- * Documentation/vgaarbiter.txt
+ * Documentation/gpu/vgaarbiter.rst
  *
  *
  * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
index 7b8e17b..6bf4da7 100644 (file)
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, hammer_devices);
index a85634f..c7981dd 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE        0x5028
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index cb86cc8..0422ec2 100644 (file)
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
 
 static int steam_client_ll_parse(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_parse_report(hdev, steam->hdev->dev_rdesc,
                        steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
 
 static int steam_client_ll_open(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
        int ret;
 
        ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
 
 static void steam_client_ll_close(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
                                size_t count, unsigned char report_type,
                                int reqtype)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
                        report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
                ret = PTR_ERR(steam->client_hdev);
                goto client_hdev_fail;
        }
-       hid_set_drvdata(steam->client_hdev, steam);
+       steam->client_hdev->driver_data = steam;
 
        /*
         * With the real steam controller interface, do not connect hidraw.
index 582e449..a2c53ea 100644 (file)
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
        kfree(ishtp_dev);
 }
 
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
 
 /* 50ms to get resume response */
 #define WAIT_FOR_RESUME_ACK_MS         50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
  * in that case a simple resume message is enough, others we need
  * a reset sequence.
  */
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
 {
        struct pci_dev *pdev = to_pci_dev(ish_resume_device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
  *
  * Return: 0 to the pm core
  */
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
        return 0;
 }
 
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
 /**
  * ish_resume() - ISH resume callback
  * @device:    device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
  *
  * Return: 0 to the pm core
  */
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
        return 0;
 }
 
-static const struct dev_pm_ops ish_pm_ops = {
-       .suspend = ish_suspend,
-       .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS       (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS       NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
 
 static struct pci_driver ish_driver = {
        .name = KBUILD_MODNAME,
        .id_table = ish_pci_tbl,
        .probe = ish_probe,
        .remove = ish_remove,
-       .driver.pm = ISHTP_ISH_PM_OPS,
+       .driver.pm = &ish_pm_ops,
 };
 
 module_pci_driver(ish_driver);
index 0108c59..e50d8fe 100644 (file)
@@ -14,7 +14,7 @@ config USB_HID
 
          You can't use this driver and the HIDBP (Boot Protocol) keyboard
          and mouse drivers at the same time. More information is available:
-         <file:Documentation/input/input.txt>.
+         <file:Documentation/input/input.rst>.
 
          If unsure, say Y.
 
index c101369..d679753 100644 (file)
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
                }
        }
 
+       /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+       if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+           hdev->product == 0x0358 &&
+           WACOM_PEN_FIELD(field) &&
+           wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+               field->logical_maximum = 43200;
+       }
+
        switch (usage->hid) {
        case HID_GD_X:
                features->x_max = field->logical_maximum;
index ff80377..c5992cd 100644 (file)
@@ -16,7 +16,7 @@ config INPUT
 
          Say N here if you have a headless (no monitor, no keyboard) system.
 
-         More information is available: <file:Documentation/input/input.txt>
+         More information is available: <file:Documentation/input/input.rst>
 
          If unsure, say Y.
 
@@ -144,7 +144,7 @@ config INPUT_JOYDEV
 
          If unsure, say Y.
 
-         More information is available: <file:Documentation/input/joystick.txt>
+         More information is available: <file:Documentation/input/joydev/joystick.rst>
 
          To compile this driver as a module, choose M here: the
          module will be called joydev.
index 9591fc0..d8f9c6e 100644 (file)
@@ -9,7 +9,7 @@ menuconfig INPUT_JOYSTICK
          and the list of supported devices will be displayed. This option
          doesn't affect the kernel.
 
-         Please read the file <file:Documentation/input/joystick.txt> which
+         Please read the file <file:Documentation/input/joydev/joystick.rst> which
          contains more information.
 
 if INPUT_JOYSTICK
@@ -25,7 +25,7 @@ config JOYSTICK_ANALOG
          Flightstick Pro, ThrustMaster FCS, 6 and 8 button gamepads, or
          Saitek Cyborg joysticks.
 
-         Please read the file <file:Documentation/input/joystick.txt> which
+         Please read the file <file:Documentation/input/joydev/joystick.rst> which
          contains more information.
 
          To compile this driver as a module, choose M here: the
@@ -214,7 +214,7 @@ config JOYSTICK_DB9
          gamepad, Sega Saturn gamepad, or a Multisystem -- Atari, Amiga,
          Commodore, Amstrad CPC joystick connected to your parallel port.
          For more information on how to use the driver please read
-         <file:Documentation/input/joystick-parport.txt>.
+         <file:Documentation/input/devices/joystick-parport.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called db9.
@@ -229,7 +229,7 @@ config JOYSTICK_GAMECON
          Sony PlayStation gamepad or a Multisystem -- Atari, Amiga,
          Commodore, Amstrad CPC joystick connected to your parallel port.
          For more information on how to use the driver please read
-         <file:Documentation/input/joystick-parport.txt>.
+         <file:Documentation/input/devices/joystick-parport.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called gamecon.
@@ -241,7 +241,7 @@ config JOYSTICK_TURBOGRAFX
          Say Y here if you have the TurboGraFX interface by Steffen Schwenke,
          and want to use it with Multisystem -- Atari, Amiga, Commodore,
          Amstrad CPC joystick. For more information on how to use the driver
-         please read <file:Documentation/input/joystick-parport.txt>.
+         please read <file:Documentation/input/devices/joystick-parport.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called turbografx.
@@ -287,7 +287,7 @@ config JOYSTICK_XPAD
          and/or "Event interface support" (CONFIG_INPUT_EVDEV) as well.
 
          For information about how to connect the X-Box pad to USB, see
-         <file:Documentation/input/xpad.txt>.
+         <file:Documentation/input/devices/xpad.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called xpad.
@@ -313,7 +313,7 @@ config JOYSTICK_WALKERA0701
          Say Y or M here if you have a Walkera WK-0701 transmitter which is
          supplied with a ready to fly Walkera helicopters such as HM36,
          HM37, HM60 and want to use it via parport as a joystick. More
-         information is available: <file:Documentation/input/walkera0701.txt>
+         information is available: <file:Documentation/input/devices/walkera0701.rst>
 
          To compile this driver as a module, choose M here: the
          module will be called walkera0701.
index 8fde22a..ab4dbcb 100644 (file)
@@ -27,6 +27,6 @@ config JOYSTICK_IFORCE_232
          connected to your serial (COM) port.
 
          You will need an additional utility called inputattach, see
-         <file:Documentation/input/joystick.txt>
-         and <file:Documentation/input/ff.txt>.
+         <file:Documentation/input/joydev/joystick.rst>
+         and <file:Documentation/input/ff.rst>.
 
index 36a5b93..dce313d 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (c) 2008 Peter Popovec
  *
- *  More about driver:  <file:Documentation/input/walkera0701.txt>
+ *  More about driver:  <file:Documentation/input/devices/walkera0701.rst>
  */
 
 /*
index 572b15f..c25606e 100644 (file)
@@ -411,7 +411,7 @@ config INPUT_YEALINK
          usb sound driver, so you might want to enable that as well.
 
          For information about how to use these additional functions, see
-         <file:Documentation/input/yealink.txt>.
+         <file:Documentation/input/devices/yealink.rst>.
 
          To compile this driver as a module, choose M here: the module will be
          called yealink.
@@ -595,7 +595,7 @@ config INPUT_GPIO_ROTARY_ENCODER
        depends on GPIOLIB || COMPILE_TEST
        help
          Say Y here to add support for rotary encoders connected to GPIO lines.
-         Check file:Documentation/input/rotary-encoder.txt for more
+         Check file:Documentation/input/devices/rotary-encoder.rst for more
          information.
 
          To compile this driver as a module, choose M here: the
index 6d30438..30ec77a 100644 (file)
@@ -7,7 +7,7 @@
  * state machine code inspired by code from Tim Ruetz
  *
  * A generic driver for rotary encoders connected to GPIO lines.
- * See file:Documentation/input/rotary-encoder.txt for more information
+ * See file:Documentation/input/devices/rotary-encoder.rst for more information
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index f27f23f..566a1e3 100644 (file)
@@ -129,7 +129,7 @@ config MOUSE_PS2_ELANTECH
 
          This driver exposes some configuration registers via sysfs
          entries. For further information,
-         see <file:Documentation/input/elantech.txt>.
+         see <file:Documentation/input/devices/elantech.rst>.
 
          If unsure, say N.
 
@@ -228,7 +228,7 @@ config MOUSE_APPLETOUCH
          scrolling in X11.
 
          For further information, see
-         <file:Documentation/input/appletouch.txt>.
+         <file:Documentation/input/devices/appletouch.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called appletouch.
@@ -251,7 +251,7 @@ config MOUSE_BCM5974
 
          The interface is currently identical to the appletouch interface,
          for further information, see
-         <file:Documentation/input/appletouch.txt>.
+         <file:Documentation/input/devices/appletouch.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called bcm5974.
index cb55797..0a6f7ca 100644 (file)
@@ -212,7 +212,7 @@ static void alps_set_abs_params_v7(struct alps_data *priv,
 static void alps_set_abs_params_ss4_v2(struct alps_data *priv,
                                       struct input_dev *dev1);
 
-/* Packet formats are described in Documentation/input/alps.txt */
+/* Packet formats are described in Documentation/input/devices/alps.rst */
 
 static bool alps_is_valid_first_byte(struct alps_data *priv,
                                     unsigned char data)
index ca4530e..d90d9f1 100644 (file)
@@ -47,7 +47,7 @@ config SERIO_SERPORT
          Say Y here if you plan to use an input device (mouse, joystick,
          tablet, 6dof) that communicates over the RS232 serial (COM) port.
 
-         More information is available: <file:Documentation/input/input.txt>
+         More information is available: <file:Documentation/input/input.rst>
 
          If unsure, say Y.
 
@@ -78,7 +78,7 @@ config SERIO_PARKBD
          Say Y here if you built a simple parallel port adapter to attach
          an additional AT keyboard, XT keyboard or PS/2 mouse.
 
-         More information is available: <file:Documentation/input/input.txt>
+         More information is available: <file:Documentation/input/input.rst>
 
          If unsure, say N.
 
index fd714ee..2566b4d 100644 (file)
@@ -68,7 +68,7 @@
  * The default values correspond to Mainstone II in QVGA mode
  *
  * Please read
- * Documentation/input/input-programming.txt for more details.
+ * Documentation/input/input-programming.rst for more details.
  */
 
 static int abs_x[3] = {150, 4000, 5};
index 00cd1f2..55e9442 100644 (file)
@@ -38,7 +38,7 @@ void pblk_rb_data_free(struct pblk_rb *rb)
 /*
  * Initialize ring buffer. The data and metadata buffers must be previously
  * allocated and their size must be a power of two
- * (Documentation/circular-buffers.txt)
+ * (Documentation/core-api/circular-buffers.rst)
  */
 int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
                 unsigned int power_size, unsigned int power_seg_sz)
index 4d20088..17bf109 100644 (file)
@@ -5,7 +5,7 @@ config BCACHE
        Allows a block device to be used as cache for other devices; uses
        a btree for indexing and the layout is optimized for SSDs.
 
-       See Documentation/bcache.txt for details.
+       See Documentation/admin-guide/bcache.rst for details.
 
 config BCACHE_DEBUG
        bool "Bcache debugging"
index 2a0968c..547c9ee 100644 (file)
@@ -18,7 +18,7 @@
  * as keys are inserted we only sort the pages that have not yet been written.
  * When garbage collection is run, we resort the entire node.
  *
- * All configuration is done via sysfs; see Documentation/bcache.txt.
+ * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
  */
 
 #include "bcache.h"
index c334e66..1d09674 100644 (file)
@@ -18,7 +18,7 @@
  * as keys are inserted we only sort the pages that have not yet been written.
  * When garbage collection is run, we resort the entire node.
  *
- * All configuration is done via sysfs; see Documentation/bcache.txt.
+ * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
  */
 
 #include "bcache.h"
index 4330b6f..d1d471a 100644 (file)
@@ -55,7 +55,7 @@ int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
         * this pairs with smp_store_release() in dvb_ringbuffer_write(),
         * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
         *
-        * for memory barriers also see Documentation/circular-buffers.txt
+        * for memory barriers also see Documentation/core-api/circular-buffers.rst
         */
        return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
 }
index 55e36a4..9ecaa9d 100644 (file)
@@ -324,7 +324,7 @@ config DVB_SP8870
          A DVB-T tuner module. Say Y when you want to support this frontend.
 
          This driver needs external firmware. Please use the command
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware sp8870" to
+         "<kerneldir>/scripts/get_dvb_firmware sp8870" to
          download/extract it, and then copy it to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
@@ -336,7 +336,7 @@ config DVB_SP887X
          A DVB-T tuner module. Say Y when you want to support this frontend.
 
          This driver needs external firmware. Please use the command
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware sp887x" to
+         "<kerneldir>/scripts/get_dvb_firmware sp887x" to
          download/extract it, and then copy it to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
@@ -387,8 +387,8 @@ config DVB_TDA1004X
          A DVB-T tuner module. Say Y when you want to support this frontend.
 
          This driver needs external firmware. Please use the commands
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045",
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
+         "<kerneldir>/scripts/get_dvb_firmware tda10045",
+         "<kerneldir>/scripts/get_dvb_firmware tda10046" to
          download/extract them, and then copy them to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
@@ -591,8 +591,8 @@ config DVB_NXT200X
          to support this frontend.
 
          This driver needs external firmware. Please use the commands
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" and
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to
+         "<kerneldir>/scripts/get_dvb_firmware nxt2002" and
+         "<kerneldir>/scripts/get_dvb_firmware nxt2004" to
          download/extract them, and then copy them to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
@@ -604,7 +604,7 @@ config DVB_OR51211
          An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
 
          This driver needs external firmware. Please use the command
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
+         "<kerneldir>/scripts/get_dvb_firmware or51211" to
          download it, and then copy it to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
@@ -617,8 +617,8 @@ config DVB_OR51132
          to support this frontend.
 
          This driver needs external firmware. Please use the commands
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_vsb" and/or
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_qam" to
+         "<kerneldir>/scripts/get_dvb_firmware or51132_vsb" and/or
+         "<kerneldir>/scripts/get_dvb_firmware or51132_qam" to
          download firmwares for 8VSB and QAM64/256, respectively. Copy them to
          /usr/lib/hotplug/firmware or /lib/firmware (depending on
          configuration of firmware hotplug).
index d5dfafb..d2b7523 100644 (file)
@@ -17,7 +17,7 @@
  *  Amaury Demol from DiBcom for providing specs and driver
  *  sources, on which this driver (and the dvb-dibusb) are based.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  *
  */
 
index de3ce27..5861f34 100644 (file)
@@ -17,7 +17,7 @@
  *  Amaury Demol from DiBcom for providing specs and driver
  *  sources, on which this driver (and the dvb-dibusb) are based.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  *
  */
 
index c983f2f..30f067f 100644 (file)
@@ -6,7 +6,7 @@
 *      under the terms of the GNU General Public License as published by the
 *      Free Software Foundation, version 2.
 *
-* see Documentation/dvb/README.dvb-usb for more information
+* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
 */
 
 #ifndef EDS1547
index 7aa7440..a6cc495 100644 (file)
@@ -27,8 +27,8 @@
  *   ATI HDTV Wonder (NXT2004)
  *
  * This driver needs external firmware. Please use the command
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" or
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to
+ * "<kerneldir>/scripts/get_dvb_firmware nxt2002" or
+ * "<kerneldir>/scripts/get_dvb_firmware nxt2004" to
  * download/extract the appropriate firmware, and then copy it to
  * /usr/lib/hotplug/firmware/ or /lib/firmware/
  * (depending on configuration of firmware hotplug).
index a1b7c30..b65ba34 100644 (file)
@@ -22,7 +22,7 @@
 
 /*
  * This driver needs external firmware. Please use the command
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
+ * "<kerneldir>/scripts/get_dvb_firmware or51211" to
  * download/extract it, and then copy it to /usr/lib/hotplug/firmware
  * or /lib/firmware (depending on configuration of firmware hotplug).
  */
index 9a726f3..1d57a20 100644 (file)
@@ -21,7 +21,7 @@
 */
 /*
  * This driver needs external firmware. Please use the command
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware alps_tdlb7" to
+ * "<kerneldir>/scripts/get_dvb_firmware alps_tdlb7" to
  * download/extract it, and then copy it to /usr/lib/hotplug/firmware
  * or /lib/firmware (depending on configuration of firmware hotplug).
  */
index f39d566..57a0d0a 100644 (file)
@@ -4,7 +4,7 @@
 
 /*
  * This driver needs external firmware. Please use the command
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware sp887x" to
+ * "<kerneldir>/scripts/get_dvb_firmware sp887x" to
  * download/extract it, and then copy it to /usr/lib/hotplug/firmware
  * or /lib/firmware (depending on configuration of firmware hotplug).
  */
index 58e3bef..7dcfb4a 100644 (file)
@@ -21,8 +21,8 @@
    */
 /*
  * This driver needs external firmware. Please use the commands
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045",
- * "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
+ * "<kerneldir>/scripts/get_dvb_firmware tda10045",
+ * "<kerneldir>/scripts/get_dvb_firmware tda10046" to
  * download/extract them, and then copy them to /usr/lib/hotplug/firmware
  * or /lib/firmware (depending on configuration of firmware hotplug).
  */
index a59f4fd..1ed67c0 100644 (file)
@@ -852,7 +852,7 @@ static int tda10071_init(struct dvb_frontend *fe)
                ret = request_firmware(&fw, fw_file, &client->dev);
                if (ret) {
                        dev_err(&client->dev,
-                               "did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
+                               "did not find the firmware file '%s' (status %d). You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware\n",
                                fw_file, ret);
                        goto error;
                }
index 96d86d6..0871c1a 100644 (file)
@@ -6,7 +6,7 @@
 *      under the terms of the GNU General Public License as published by the
 *      Free Software Foundation, version 2.
 *
-* see Documentation/dvb/README.dvb-usb for more information
+* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
 */
 
 #ifndef Z0194A
index 87cba15..008a082 100644 (file)
@@ -1202,7 +1202,7 @@ static const struct v4l2_ctrl_ops max2175_ctrl_ops = {
 
 /*
  * I2S output enable/disable configuration. This is a private control.
- * Refer to Documentation/media/v4l-drivers/max2175 for more details.
+ * Refer to Documentation/media/v4l-drivers/max2175.rst for more details.
  */
 static const struct v4l2_ctrl_config max2175_i2s_en = {
        .ops = &max2175_ctrl_ops,
@@ -1218,7 +1218,7 @@ static const struct v4l2_ctrl_config max2175_i2s_en = {
 
 /*
  * HSLS value control LO freq adjacent location configuration.
- * Refer to Documentation/media/v4l-drivers/max2175 for more details.
+ * Refer to Documentation/media/v4l-drivers/max2175.rst for more details.
  */
 static const struct v4l2_ctrl_config max2175_hsls = {
        .ops = &max2175_ctrl_ops,
@@ -1234,7 +1234,7 @@ static const struct v4l2_ctrl_config max2175_hsls = {
 /*
  * Rx modes below are a set of preset configurations that decides the tuner's
  * sck and sample rate of transmission. They are separate for EU & NA regions.
- * Refer to Documentation/media/v4l-drivers/max2175 for more details.
+ * Refer to Documentation/media/v4l-drivers/max2175.rst for more details.
  */
 static const char * const max2175_ctrl_eu_rx_modes[] = {
        [MAX2175_EU_FM_1_2]     = "EU FM 1.2",
index 4a93f6d..bc89e37 100644 (file)
@@ -16,7 +16,7 @@ config VIDEO_BT848
        ---help---
          Support for BT848 based frame grabber/overlay boards. This includes
          the Miro, Hauppauge and STB boards. Please read the material in
-         <file:Documentation/video4linux/bttv/> for more information.
+         <file:Documentation/media/v4l-drivers/bttv.rst> for more information.
 
          To compile this driver as a module, choose M here: the
          module will be called bttv.
index 010f39e..a3a7f70 100644 (file)
@@ -152,7 +152,7 @@ static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
 
        if (ret) {
                CX18_ERR("The MPC718 board variant with the MT352 DVB-T demodulator will not work without it\n");
-               CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware mpc718' if you need the firmware\n");
+               CX18_ERR("Run 'linux/scripts/get_dvb_firmware mpc718' if you need the firmware\n");
        }
        return ret;
 }
index a594cfd..b36f4ce 100644 (file)
@@ -853,7 +853,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
 
                /*
                 * Audio related reset according to
-                * Documentation/video4linux/cx2341x/fw-encoder-api.txt
+                * Documentation/media/v4l-drivers/cx2341x.rst
                 */
                if (atomic_read(&cx->ana_capturing) == 0)
                        cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2,
@@ -861,7 +861,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
 
                /*
                 * Number of lines for Field 1 & Field 2 according to
-                * Documentation/video4linux/cx2341x/fw-encoder-api.txt
+                * Documentation/media/v4l-drivers/cx2341x.rst
                 * Field 1 is 312 for 625 line systems in BT.656
                 * Field 2 is 313 for 625 line systems in BT.656
                 */
index 3a1c551..9f50748 100644 (file)
@@ -2426,7 +2426,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
 
                ret = request_firmware(&fw, filename, &dev->pci->dev);
                if (ret != 0)
-                       pr_err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+                       pr_err("did not find the firmware file '%s'. You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware.",
                               filename);
                else
                        altera_init(&netup_config, fw);
index 2e60334..9a50f54 100644 (file)
@@ -5,7 +5,7 @@ config VIDEO_MEYE
        ---help---
          This is the video4linux driver for the Motion Eye camera found
          in the Vaio Picturebook laptops. Please read the material in
-         <file:Documentation/video4linux/meye.txt> for more information.
+         <file:Documentation/media/v4l-drivers/meye.rst> for more information.
 
          If you say Y or M here, you need to say Y or M to "Sony Laptop
          Extras" in the misc device section.
index 7b83151..dfba74d 100644 (file)
@@ -24,7 +24,7 @@ config DVB_AV7110
          onboard MPEG2 decoder.
 
          This driver needs an external firmware. Please use the script
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware av7110" to
+         "<kerneldir>/scripts/get_dvb_firmware av7110" to
          download/extract it, and then copy it to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
index 4d5a26b..d85ffbf 100644 (file)
@@ -1021,7 +1021,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
  *  - a videobuffer is queued on the pcdev->capture list
  *
  * Please check the "DMA hot chaining timeslice issue" in
- *   Documentation/video4linux/pxa_camera.txt
+ *   Documentation/media/v4l-drivers/pxa_camera.rst
  *
  * Context: should only be called within the dma irq handler
  */
@@ -1443,7 +1443,7 @@ static void pxac_vb2_queue(struct vb2_buffer *vb)
 
 /*
  * Please check the DMA prepared buffer structure in :
- *   Documentation/video4linux/pxa_camera.txt
+ *   Documentation/media/v4l-drivers/pxa_camera.rst
  * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
  * modification while DMA chain is running will work anyway.
  */
index 242342f..9897213 100644 (file)
@@ -1111,7 +1111,7 @@ static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
 /*
  * CEU can scale and crop, but we don't want to waste bandwidth and kill the
  * framerate by always requesting the maximum image from the client. See
- * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of
+ * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of
  * scaling and cropping algorithms and for the meaning of referenced here steps.
  */
 static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd,
index f01c3e8..c8bb82f 100644 (file)
 #include <linux/via-core.h>
 #include <linux/via-gpio.h>
 #include <linux/via_i2c.h>
+
+#ifdef CONFIG_X86
 #include <asm/olpc.h>
+#else
+#define machine_is_olpc(x) 0
+#endif
 
 #include "via-camera.h"
 
index 39b04ad..9b99dfb 100644 (file)
@@ -35,7 +35,7 @@ config RADIO_SI476X
          In order to control your radio card, you will need to use programs
          that are compatible with the Video For Linux 2 API.  Information on
          this API and pointers to "v4l2" programs may be found at
-         <file:Documentation/video4linux/API.html>.
+         <file:Documentation/media/media_uapi.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called radio-si476x.
@@ -75,7 +75,7 @@ config RADIO_MAXIRADIO
          In order to control your radio card, you will need to use programs
          that are compatible with the Video For Linux API.  Information on
          this API and pointers to "v4l" programs may be found at
-         <file:Documentation/video4linux/API.html>.
+         <file:Documentation/media/media_uapi.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called radio-maxiradio.
@@ -93,7 +93,7 @@ config RADIO_SHARK
          In order to control your radio card, you will need to use programs
          that are compatible with the Video For Linux API.  Information on
          this API and pointers to "v4l" programs may be found at
-         <file:Documentation/video4linux/API.html>.
+         <file:Documentation/media/media_uapi.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called radio-shark.
@@ -110,7 +110,7 @@ config RADIO_SHARK2
          In order to control your radio card, you will need to use programs
          that are compatible with the Video For Linux API.  Information on
          this API and pointers to "v4l" programs may be found at
-         <file:Documentation/video4linux/API.html>.
+         <file:Documentation/media/media_uapi.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called radio-shark2.
@@ -217,7 +217,7 @@ config RADIO_WL1273
          In order to control your radio card, you will need to use programs
          that are compatible with the Video For Linux 2 API.  Information on
          this API and pointers to "v4l2" programs may be found at
-         <file:Documentation/video4linux/API.html>.
+         <file:Documentation/media/media_uapi.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called radio-wl1273.
@@ -272,7 +272,7 @@ config RADIO_RTRACK
          been reported to be used by these cards.
 
          More information is contained in the file
-         <file:Documentation/video4linux/radiotrack.txt>.
+         <file:Documentation/media/v4l-drivers/radiotrack.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called radio-aimslab.
index a21172e..6dbb158 100644 (file)
@@ -29,7 +29,7 @@ config USB_SI470X
 
          Please have a look at the documentation, especially on how
          to redirect the audio stream from the radio to your sound device:
-         Documentation/video4linux/si470x.txt
+         Documentation/media/v4l-drivers/si470x.rst
 
          Say Y here if you want to connect this type of radio to your
          computer's USB port.
index 2add222..64b66bb 100644 (file)
@@ -12,6 +12,6 @@ config RADIO_WL128X
          In order to control your radio card, you will need to use programs
          that are compatible with the Video For Linux 2 API.  Information on
          this API and pointers to "v4l2" programs may be found at
-         <file:Documentation/video4linux/API.html>.
+         <file:Documentation/media/media_uapi.rst>.
 
 endmenu
index 3705347..082b8d6 100644 (file)
@@ -6,7 +6,7 @@ config DVB_USB_V2
          USB1.1 and USB2.0 DVB devices.
 
          Almost every USB device needs a firmware, please look into
-         <file:Documentation/dvb/README.dvb-usb>.
+         <file:Documentation/media/dvb-drivers/dvb-usb.rst>.
 
          For a complete list of supported USB devices see the LinuxTV DVB Wiki:
          <https://linuxtv.org/wiki/index.php/DVB_USB>
index afdcdbf..955318a 100644 (file)
@@ -47,7 +47,7 @@ static int dvb_usbv2_download_firmware(struct dvb_usb_device *d,
        ret = request_firmware(&fw, name, &d->udev->dev);
        if (ret < 0) {
                dev_err(&d->udev->dev,
-                               "%s: Did not find the firmware file '%s'. Please see linux/Documentation/dvb/ for more details on firmware-problems. Status %d\n",
+                               "%s: Did not find the firmware file '%s' (status %d). You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware\n",
                                KBUILD_MODNAME, name, ret);
                goto err;
        }
index 4817dfd..9d154fd 100644 (file)
@@ -4,7 +4,7 @@
  *     under the terms of the GNU General Public License as published by the
  *     Free Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "gl861.h"
 
index be26c02..0750a97 100644 (file)
@@ -21,7 +21,7 @@
  *
  * LME2510C + M88RS2000
  *
- * For firmware see Documentation/dvb/lmedm04.txt
+ * For firmware see Documentation/media/dvb-drivers/lmedm04.rst
  *
  * I2C addresses:
  * 0xd0 - STV0288      - Demodulator
@@ -49,7 +49,7 @@
  * GNU General Public License for more details.
  *
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  *
  * Known Issues :
  *     LME2510: Non Intel USB chipsets fail to maintain High Speed on
index e9c2072..c4ae37c 100644 (file)
@@ -16,7 +16,7 @@
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation,  version 2.
  * *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_LME2510_H_
 #define _DVB_USB_LME2510_H_
index 6795336..4713ba6 100644 (file)
@@ -5,7 +5,7 @@
  *   under the terms of the GNU General Public License as published by the Free
  *   Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 
 #include <linux/vmalloc.h>
index 3e6f588..22253d4 100644 (file)
@@ -5,7 +5,7 @@
  *   under the terms of the GNU General Public License as published by the Free
  *   Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 
 #ifndef _DVB_USB_MXL111SF_H_
index 2651ae2..b8a1c62 100644 (file)
@@ -6,7 +6,7 @@ config DVB_USB
          USB1.1 and USB2.0 DVB devices.
 
          Almost every USB device needs a firmware, please look into
-         <file:Documentation/dvb/README.dvb-usb>.
+         <file:Documentation/media/dvb-drivers/dvb-usb.rst>.
 
          For a complete list of supported USB devices see the LinuxTV DVB Wiki:
          <https://linuxtv.org/wiki/index.php/DVB_USB>
index 540886b..198bd5e 100644 (file)
@@ -11,7 +11,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dibusb.h"
 
index 544bdf1..7fbbc95 100644 (file)
@@ -15,7 +15,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "af9005.h"
 #include "af9005-script.h"
index 9b29ffa..f7cdcc8 100644 (file)
@@ -17,7 +17,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "af9005.h"
 /* debug */
index 986763b..16e946e 100644 (file)
@@ -15,7 +15,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "af9005.h"
 
index a1eae0f..7ae4dc3 100644 (file)
@@ -15,7 +15,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_AF9005_H_
 #define _DVB_USB_AF9005_H_
index f0d10ac..6321b8e 100644 (file)
@@ -7,7 +7,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "az6027.h"
 
index b70d289..5b51ed7 100644 (file)
@@ -21,7 +21,7 @@
  *   under the terms of the GNU General Public License as published by the Free
  *   Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include <media/tuner.h>
 #include <linux/vmalloc.h>
index bcacb0f..fb1b4f2 100644 (file)
@@ -6,7 +6,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 
 #include "dibusb.h"
index a005764..4089205 100644 (file)
@@ -10,7 +10,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dibusb.h"
 
index 0c2bc97..ec3a20a 100644 (file)
@@ -6,7 +6,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 
 #include "dibusb.h"
index 08fb8a3..bce8ffe 100644 (file)
@@ -10,7 +10,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dibusb.h"
 
index 697be2a..943df57 100644 (file)
@@ -6,7 +6,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_DIBUSB_H_
 #define _DVB_USB_DIBUSB_H_
index 475a3c0..49b9d63 100644 (file)
@@ -9,7 +9,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "digitv.h"
 
index 00f565f..7e75aae 100644 (file)
@@ -7,7 +7,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dtt200u.h"
 
index 5123707..f03d269 100644 (file)
@@ -9,7 +9,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dtt200u.h"
 
index efccc39..ea2a096 100644 (file)
@@ -7,7 +7,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_DTT200U_H_
 #define _DVB_USB_DTT200U_H_
index 15c153e..42c207a 100644 (file)
@@ -90,7 +90,7 @@ int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_pro
        const struct firmware *fw = NULL;
 
        if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) {
-               err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+               err("did not find the firmware file '%s' (status %d). You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware",
                        props->firmware,ret);
                return ret;
        }
index 8430856..40ca4ea 100644 (file)
@@ -9,7 +9,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dvb-usb-common.h"
 
index 346946f..0d4fdd3 100644 (file)
@@ -11,7 +11,7 @@
  *     under the terms of the GNU General Public License as published by the
  *     Free Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include <media/dvb-usb-ids.h>
 #include "dw2102.h"
@@ -61,9 +61,7 @@
 #define P1100_FIRMWARE  "dvb-usb-p1100.fw"
 #define P7500_FIRMWARE  "dvb-usb-p7500.fw"
 
-#define        err_str "did not find the firmware file. (%s) " \
-               "Please see linux/Documentation/dvb/ for more details " \
-               "on firmware-problems."
+#define        err_str "did not find the firmware file '%s'. You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware"
 
 struct dw2102_state {
        u8 initialized;
index b2830c1..932f262 100644 (file)
@@ -8,7 +8,7 @@
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include <linux/init.h>
 #include <linux/string.h>
index 1687594..fe799a7 100644 (file)
@@ -8,7 +8,7 @@
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "friio.h"
 
index 0f461ca..a53af56 100644 (file)
@@ -8,7 +8,7 @@
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_FRIIO_H_
 #define _DVB_USB_FRIIO_H_
index 334b9fb..13e96b0 100644 (file)
@@ -12,7 +12,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "gp8psk.h"
 #include "gp8psk-fe.h"
@@ -135,7 +135,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
        u8 *buf;
        if ((ret = request_firmware(&fw, bcm4500_firmware,
                                        &d->udev->dev)) != 0) {
-               err("did not find the bcm4500 firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+               err("did not find the bcm4500 firmware file '%s' (status %d). You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware",
                        bcm4500_firmware,ret);
                return ret;
        }
index d8975b8..fd063e3 100644 (file)
@@ -12,7 +12,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_GP8PSK_H_
 #define _DVB_USB_GP8PSK_H_
index 32081c2..51b026f 100644 (file)
@@ -6,7 +6,7 @@
  *     under the terms of the GNU General Public License as published by the
  *     Free Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 
 #include "m920x.h"
index 1babd33..43e0e0f 100644 (file)
@@ -7,7 +7,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dibusb.h"
 
index 946a5cc..61a377e 100644 (file)
@@ -7,7 +7,7 @@
 *      under the terms of the GNU General Public License as published by the Free
 *      Software Foundation, version 2.
 *
-* see Documentation/dvb/README.dvb-usb for more information
+* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
 */
 
 #define DVB_USB_LOG_PREFIX "opera"
@@ -453,7 +453,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
        info("start downloading fpga firmware %s",filename);
 
        if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
-               err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+               err("did not find the firmware file '%s'. You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware",
                        filename);
                return ret;
        } else {
index 12de896..b4d6811 100644 (file)
@@ -20,7 +20,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #define DVB_USB_LOG_PREFIX "ttusb2"
 #include "dvb-usb.h"
index 52a63af..8b6525e 100644 (file)
@@ -9,7 +9,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_TTUSB2_H_
 #define _DVB_USB_TTUSB2_H_
index 58ad5b4..920bc67 100644 (file)
@@ -7,7 +7,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "dibusb.h"
 
index 7ff31ba..ae48146 100644 (file)
@@ -15,7 +15,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  *
  */
 #include "vp702x.h"
index 40de33d..c3529ea 100644 (file)
@@ -12,7 +12,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "vp702x.h"
 #include <linux/mutex.h>
index 4520ad9..f860401 100644 (file)
@@ -9,7 +9,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  *
  */
 #include "vp7045.h"
index 2527b88..e2c8a85 100644 (file)
@@ -10,7 +10,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #include "vp7045.h"
 
index 6649993..2fdafd8 100644 (file)
@@ -9,7 +9,7 @@
  *     under the terms of the GNU General Public License as published by the Free
  *     Software Foundation, version 2.
  *
- * see Documentation/dvb/README.dvb-usb for more information
+ * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
  */
 #ifndef _DVB_USB_VP7045_H_
 #define _DVB_USB_VP7045_H_
index 5a69016..13a0039 100644 (file)
@@ -5,7 +5,5 @@ config USB_M5602
          Say Y here if you want support for cameras based on the
          ALi m5602 connected to various image sensors.
 
-         See <file:Documentation/video4linux/m5602.txt> for more info.
-
          To compile this driver as a module, choose M here: the
          module will be called gspca_m5602.
index 290254a..b205903 100644 (file)
@@ -12,9 +12,9 @@ config DVB_TTUSB_DEC
          an external software decoder to watch TV on your computer.
 
          This driver needs external firmware. Please use the commands
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware dec2000t",
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware dec2540t",
-         "<kerneldir>/Documentation/dvb/get_dvb_firmware dec3000s",
+         "<kerneldir>/scripts/get_dvb_firmware dec2000t",
+         "<kerneldir>/scripts/get_dvb_firmware dec2540t",
+         "<kerneldir>/scripts/get_dvb_firmware dec3000s",
          download/extract them, and then copy them to /usr/lib/hotplug/firmware
          or /lib/firmware (depending on configuration of firmware hotplug).
 
index 0f58566..ac429bc 100644 (file)
@@ -6,7 +6,7 @@ config USB_ZR364XX
        ---help---
          Say Y here if you want to connect this type of camera to your
          computer's USB port.
-         See <file:Documentation/video4linux/zr364xx.txt> for more info
+         See <file:Documentation/media/v4l-drivers/zr364xx.rst> for more info
          and list of supported cameras.
 
          To compile this driver as a module, choose M here: the
index 14d287b..1ab613e 100644 (file)
@@ -33,7 +33,7 @@ config E100
          to identify the adapter.
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/e100.txt>.
+         <file:Documentation/networking/e100.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called e100.
@@ -49,7 +49,7 @@ config E1000
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         <file:Documentation/networking/e1000.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called e1000.
@@ -94,7 +94,7 @@ config IGB
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         <file:Documentation/networking/e1000.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called igb.
@@ -130,7 +130,7 @@ config IGBVF
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         <file:Documentation/networking/e1000.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called igbvf.
index effb130..21710a7 100644 (file)
@@ -2208,7 +2208,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                 * Verify that the subsystem actually supports multiple
                 * controllers, else bail out.
                 */
-               if (!ctrl->opts->discovery_nqn &&
+               if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
                    nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
                        dev_err(ctrl->device,
                                "ignoring ctrl due to duplicate subnqn (%s).\n",
@@ -3197,40 +3197,28 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
        nvme_remove_invalid_namespaces(ctrl, nn);
 }
 
-static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
+static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
 {
        size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
        __le32 *log;
-       int error, i;
-       bool ret = false;
+       int error;
 
        log = kzalloc(log_size, GFP_KERNEL);
        if (!log)
-               return false;
+               return;
 
+       /*
+        * We need to read the log to clear the AEN, but we don't want to rely
+        * on it for the changed namespace information as userspace could have
+        * raced with us in reading the log page, which could cause us to miss
+        * updates.
+        */
        error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
-       if (error) {
+       if (error)
                dev_warn(ctrl->device,
                        "reading changed ns log failed: %d\n", error);
-               goto out_free_log;
-       }
-
-       if (log[0] == cpu_to_le32(0xffffffff))
-               goto out_free_log;
-
-       for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
-               u32 nsid = le32_to_cpu(log[i]);
 
-               if (nsid == 0)
-                       break;
-               dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
-               nvme_validate_ns(ctrl, nsid);
-       }
-       ret = true;
-
-out_free_log:
        kfree(log);
-       return ret;
 }
 
 static void nvme_scan_work(struct work_struct *work)
@@ -3246,9 +3234,8 @@ static void nvme_scan_work(struct work_struct *work)
        WARN_ON_ONCE(!ctrl->tagset);
 
        if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
-               if (nvme_scan_changed_ns_log(ctrl))
-                       goto out_sort_namespaces;
                dev_info(ctrl->device, "rescanning namespaces.\n");
+               nvme_clear_changed_ns_log(ctrl);
        }
 
        if (nvme_identify_ctrl(ctrl, &id))
@@ -3263,7 +3250,6 @@ static void nvme_scan_work(struct work_struct *work)
        nvme_scan_ns_sequential(ctrl, nn);
 out_free_id:
        kfree(id);
-out_sort_namespaces:
        down_write(&ctrl->namespaces_rwsem);
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
        up_write(&ctrl->namespaces_rwsem);
@@ -3641,16 +3627,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
-int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
-{
-       if (!ctrl->ops->reinit_request)
-               return 0;
-
-       return blk_mq_tagset_iter(set, set->driver_data,
-                       ctrl->ops->reinit_request);
-}
-EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
-
 int __init nvme_core_init(void)
 {
        int result = -ENOMEM;
index fa32c12..903eb45 100644 (file)
@@ -536,67 +536,55 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
        return NULL;
 }
 
-blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
-               bool queue_live, bool is_connected)
+/*
+ * For something we're not in a state to send to the device the default action
+ * is to busy it and retry it after the controller state is recovered.  However,
+ * anything marked for failfast or nvme multipath is immediately failed.
+ *
+ * Note: commands used to initialize the controller will be marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+blk_status_t nvmf_fail_nonready_command(struct request *rq)
 {
-       struct nvme_command *cmd = nvme_req(rq)->cmd;
+       if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+               return BLK_STS_RESOURCE;
+       nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+       return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
 
-       if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
-               return BLK_STS_OK;
+bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+               bool queue_live)
+{
+       struct nvme_request *req = nvme_req(rq);
+
+       /*
+        * If we are in some state of setup or teardown only allow
+        * internally generated commands.
+        */
+       if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
+               return false;
 
+       /*
+        * Only allow commands on a live queue, except for the connect command,
+        * which is require to set the queue live in the appropinquate states.
+        */
        switch (ctrl->state) {
        case NVME_CTRL_NEW:
        case NVME_CTRL_CONNECTING:
-       case NVME_CTRL_DELETING:
-               /*
-                * This is the case of starting a new or deleting an association
-                * but connectivity was lost before it was fully created or torn
-                * down. We need to error the commands used to initialize the
-                * controller so the reconnect can go into a retry attempt.  The
-                * commands should all be marked REQ_FAILFAST_DRIVER, which will
-                * hit the reject path below. Anything else will be queued while
-                * the state settles.
-                */
-               if (!is_connected)
-                       break;
-
-               /*
-                * If queue is live, allow only commands that are internally
-                * generated pass through.  These are commands on the admin
-                * queue to initialize the controller. This will reject any
-                * ioctl admin cmds received while initializing.
-                */
-               if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
-                       return BLK_STS_OK;
-
-               /*
-                * If the queue is not live, allow only a connect command.  This
-                * will reject any ioctl admin cmd as well as initialization
-                * commands if the controller reverted the queue to non-live.
-                */
-               if (!queue_live && blk_rq_is_passthrough(rq) &&
-                    cmd->common.opcode == nvme_fabrics_command &&
-                    cmd->fabrics.fctype == nvme_fabrics_type_connect)
-                       return BLK_STS_OK;
+               if (req->cmd->common.opcode == nvme_fabrics_command &&
+                   req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+                       return true;
                break;
        default:
                break;
+       case NVME_CTRL_DEAD:
+               return false;
        }
 
-       /*
-        * Any other new io is something we're not in a state to send to the
-        * device.  Default action is to busy it and retry it after the
-        * controller state is recovered. However, anything marked for failfast
-        * or nvme multipath is immediately failed.  Note: commands used to
-        * initialize the controller will be marked for failfast.
-        * Note: nvme cli/ioctl commands are marked for failfast.
-        */
-       if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
-               return BLK_STS_RESOURCE;
-       nvme_req(rq)->status = NVME_SC_ABORT_REQ;
-       return BLK_STS_IOERR;
+       return queue_live;
 }
-EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
+EXPORT_SYMBOL_GPL(__nvmf_check_ready);
 
 static const match_table_t opt_tokens = {
        { NVMF_OPT_TRANSPORT,           "transport=%s"          },
index 7491a0b..e1818a2 100644 (file)
@@ -162,7 +162,17 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
-       struct request *rq, bool queue_live, bool is_connected);
+blk_status_t nvmf_fail_nonready_command(struct request *rq);
+bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+               bool queue_live);
+
+static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+               bool queue_live)
+{
+       if (likely(ctrl->state == NVME_CTRL_LIVE ||
+                  ctrl->state == NVME_CTRL_ADMIN_ONLY))
+               return true;
+       return __nvmf_check_ready(ctrl, rq, queue_live);
+}
 
 #endif /* _NVME_FABRICS_H */
index 0bad658..b528a2f 100644 (file)
@@ -142,6 +142,7 @@ struct nvme_fc_ctrl {
        struct nvme_fc_rport    *rport;
        u32                     cnum;
 
+       bool                    ioq_live;
        bool                    assoc_active;
        u64                     association_id;
 
@@ -1470,21 +1471,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
 
 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
 
-static int
-nvme_fc_reinit_request(void *data, struct request *rq)
-{
-       struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-       struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
-
-       memset(cmdiu, 0, sizeof(*cmdiu));
-       cmdiu->scsi_id = NVME_CMD_SCSI_ID;
-       cmdiu->fc_id = NVME_CMD_FC_ID;
-       cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
-       memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
-
-       return 0;
-}
-
 static void
 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
                struct nvme_fc_fcp_op *op)
@@ -1893,6 +1879,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
+       atomic_set(&queue->csn, 1);
 }
 
 static void
@@ -2279,14 +2266,13 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
        struct nvme_command *sqe = &cmdiu->sqe;
        enum nvmefc_fcp_datadir io_dir;
+       bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
        u32 data_len;
        blk_status_t ret;
 
-       ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
-               test_bit(NVME_FC_Q_LIVE, &queue->flags),
-               ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
-       if (unlikely(ret))
-               return ret;
+       if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+           !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+               return nvmf_fail_nonready_command(rq);
 
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
@@ -2463,6 +2449,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_delete_hw_queues;
 
+       ctrl->ioq_live = true;
+
        return 0;
 
 out_delete_hw_queues:
@@ -2480,7 +2468,7 @@ out_free_tag_set:
 }
 
 static int
-nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
+nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
 {
        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
        unsigned int nr_io_queues;
@@ -2500,12 +2488,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ctrl->ctrl.queue_count == 1)
                return 0;
 
-       nvme_fc_init_io_queues(ctrl);
-
-       ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
-       if (ret)
-               goto out_free_io_queues;
-
        ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_free_io_queues;
@@ -2603,8 +2585,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
         * Create the admin queue
         */
 
-       nvme_fc_init_queue(ctrl, 0);
-
        ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
                                NVME_AQ_DEPTH);
        if (ret)
@@ -2615,8 +2595,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_delete_hw_queue;
 
-       if (ctrl->ctrl.state != NVME_CTRL_NEW)
-               blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
        ret = nvmf_connect_admin_queue(&ctrl->ctrl);
        if (ret)
@@ -2689,10 +2668,10 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
         */
 
        if (ctrl->ctrl.queue_count > 1) {
-               if (ctrl->ctrl.state == NVME_CTRL_NEW)
+               if (!ctrl->ioq_live)
                        ret = nvme_fc_create_io_queues(ctrl);
                else
-                       ret = nvme_fc_reinit_io_queues(ctrl);
+                       ret = nvme_fc_recreate_io_queues(ctrl);
                if (ret)
                        goto out_term_aen_ops;
        }
@@ -2776,8 +2755,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
         * use blk_mq_tagset_busy_itr() and the transport routine to
         * terminate the exchanges.
         */
-       if (ctrl->ctrl.state != NVME_CTRL_NEW)
-               blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
 
@@ -2917,7 +2895,6 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .submit_async_event     = nvme_fc_submit_async_event,
        .delete_ctrl            = nvme_fc_delete_ctrl,
        .get_address            = nvmf_get_address,
-       .reinit_request         = nvme_fc_reinit_request,
 };
 
 static void
@@ -2934,7 +2911,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
                nvme_fc_reconnect_or_delete(ctrl, ret);
        else
                dev_info(ctrl->ctrl.device,
-                       "NVME-FC{%d}: controller reconnect complete\n",
+                       "NVME-FC{%d}: controller connect complete\n",
                        ctrl->cnum);
 }
 
@@ -2982,7 +2959,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 {
        struct nvme_fc_ctrl *ctrl;
        unsigned long flags;
-       int ret, idx, retry;
+       int ret, idx;
 
        if (!(rport->remoteport.port_role &
            (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -3009,11 +2986,13 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        }
 
        ctrl->ctrl.opts = opts;
+       ctrl->ctrl.nr_reconnects = 0;
        INIT_LIST_HEAD(&ctrl->ctrl_list);
        ctrl->lport = lport;
        ctrl->rport = rport;
        ctrl->dev = lport->dev;
        ctrl->cnum = idx;
+       ctrl->ioq_live = false;
        ctrl->assoc_active = false;
        init_waitqueue_head(&ctrl->ioabort_wait);
 
@@ -3032,6 +3011,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
        ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
+       ctrl->ctrl.cntlid = 0xffff;
 
        ret = -ENOMEM;
        ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
@@ -3039,6 +3019,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        if (!ctrl->queues)
                goto out_free_ida;
 
+       nvme_fc_init_queue(ctrl, 0);
+
        memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
        ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
        ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
@@ -3081,62 +3063,24 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
        spin_unlock_irqrestore(&rport->lock, flags);
 
-       /*
-        * It's possible that transactions used to create the association
-        * may fail. Examples: CreateAssociation LS or CreateIOConnection
-        * LS gets dropped/corrupted/fails; or a frame gets dropped or a
-        * command times out for one of the actions to init the controller
-        * (Connect, Get/Set_Property, Set_Features, etc). Many of these
-        * transport errors (frame drop, LS failure) inherently must kill
-        * the association. The transport is coded so that any command used
-        * to create the association (prior to a LIVE state transition
-        * while NEW or CONNECTING) will fail if it completes in error or
-        * times out.
-        *
-        * As such: as the connect request was mostly likely due to a
-        * udev event that discovered the remote port, meaning there is
-        * not an admin or script there to restart if the connect
-        * request fails, retry the initial connection creation up to
-        * three times before giving up and declaring failure.
-        */
-       for (retry = 0; retry < 3; retry++) {
-               ret = nvme_fc_create_association(ctrl);
-               if (!ret)
-                       break;
-       }
-
-       if (ret) {
-               nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
-               cancel_work_sync(&ctrl->ctrl.reset_work);
-               cancel_delayed_work_sync(&ctrl->connect_work);
-
-               /* couldn't schedule retry - fail out */
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
+           !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                dev_err(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
-
-               ctrl->ctrl.opts = NULL;
+                       "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
+               goto fail_ctrl;
+       }
 
-               /* initiate nvme ctrl ref counting teardown */
-               nvme_uninit_ctrl(&ctrl->ctrl);
+       nvme_get_ctrl(&ctrl->ctrl);
 
-               /* Remove core ctrl ref. */
+       if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
                nvme_put_ctrl(&ctrl->ctrl);
-
-               /* as we're past the point where we transition to the ref
-                * counting teardown path, if we return a bad pointer here,
-                * the calling routine, thinking it's prior to the
-                * transition, will do an rport put. Since the teardown
-                * path also does a rport put, we do an extra get here to
-                * so proper order/teardown happens.
-                */
-               nvme_fc_rport_get(rport);
-
-               if (ret > 0)
-                       ret = -EIO;
-               return ERR_PTR(ret);
+               dev_err(ctrl->ctrl.device,
+                       "NVME-FC{%d}: failed to schedule initial connect\n",
+                       ctrl->cnum);
+               goto fail_ctrl;
        }
 
-       nvme_get_ctrl(&ctrl->ctrl);
+       flush_delayed_work(&ctrl->connect_work);
 
        dev_info(ctrl->ctrl.device,
                "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
@@ -3144,6 +3088,30 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
        return &ctrl->ctrl;
 
+fail_ctrl:
+       nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+       cancel_work_sync(&ctrl->ctrl.reset_work);
+       cancel_delayed_work_sync(&ctrl->connect_work);
+
+       ctrl->ctrl.opts = NULL;
+
+       /* initiate nvme ctrl ref counting teardown */
+       nvme_uninit_ctrl(&ctrl->ctrl);
+
+       /* Remove core ctrl ref. */
+       nvme_put_ctrl(&ctrl->ctrl);
+
+       /* as we're past the point where we transition to the ref
+        * counting teardown path, if we return a bad pointer here,
+        * the calling routine, thinking it's prior to the
+        * transition, will do an rport put. Since the teardown
+        * path also does a rport put, we do an extra get here to
+        * so proper order/teardown happens.
+        */
+       nvme_fc_rport_get(rport);
+
+       return ERR_PTR(-EIO);
+
 out_cleanup_admin_q:
        blk_cleanup_queue(ctrl->ctrl.admin_q);
 out_free_admin_tag_set:
index d7b664a..1ffd3e8 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/moduleparam.h>
+#include <trace/events/block.h>
 #include "nvme.h"
 
 static bool multipath = true;
@@ -111,6 +112,9 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
        if (likely(ns)) {
                bio->bi_disk = ns->disk;
                bio->bi_opf |= REQ_NVME_MPATH;
+               trace_block_bio_remap(bio->bi_disk->queue, bio,
+                                     disk_devt(ns->head->disk),
+                                     bio->bi_iter.bi_sector);
                ret = direct_make_request(bio);
        } else if (!list_empty_careful(&head->list)) {
                dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
index 34df07d..231807c 100644 (file)
@@ -321,7 +321,6 @@ struct nvme_ctrl_ops {
        void (*submit_async_event)(struct nvme_ctrl *ctrl);
        void (*delete_ctrl)(struct nvme_ctrl *ctrl);
        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
-       int (*reinit_request)(void *data, struct request *rq);
        void (*stop_ctrl)(struct nvme_ctrl *ctrl);
 };
 
@@ -416,7 +415,6 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl);
 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
 void nvme_start_freeze(struct nvme_ctrl *ctrl);
-int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set);
 
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
index 2aba038..c9424da 100644 (file)
@@ -1189,21 +1189,38 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
        count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
                    rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        if (unlikely(count <= 0)) {
-               sg_free_table_chained(&req->sg_table, true);
-               return -EIO;
+               ret = -EIO;
+               goto out_free_table;
        }
 
        if (count == 1) {
                if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
                    blk_rq_payload_bytes(rq) <=
-                               nvme_rdma_inline_data_size(queue))
-                       return nvme_rdma_map_sg_inline(queue, req, c);
+                               nvme_rdma_inline_data_size(queue)) {
+                       ret = nvme_rdma_map_sg_inline(queue, req, c);
+                       goto out;
+               }
 
-               if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
-                       return nvme_rdma_map_sg_single(queue, req, c);
+               if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+                       ret = nvme_rdma_map_sg_single(queue, req, c);
+                       goto out;
+               }
        }
 
-       return nvme_rdma_map_sg_fr(queue, req, c, count);
+       ret = nvme_rdma_map_sg_fr(queue, req, c, count);
+out:
+       if (unlikely(ret))
+               goto out_unmap_sg;
+
+       return 0;
+
+out_unmap_sg:
+       ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
+                       req->nents, rq_data_dir(rq) ==
+                       WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+out_free_table:
+       sg_free_table_chained(&req->sg_table, true);
+       return ret;
 }
 
 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1613,15 +1630,14 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_rdma_qe *sqe = &req->sqe;
        struct nvme_command *c = sqe->data;
        struct ib_device *dev;
+       bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
        blk_status_t ret;
        int err;
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
-               test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
-       if (unlikely(ret))
-               return ret;
+       if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+               return nvmf_fail_nonready_command(rq);
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
index 9625328..3880357 100644 (file)
@@ -119,9 +119,11 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
        else
                status = nvmet_get_smart_log_nsid(req, log);
        if (status)
-               goto out;
+               goto out_free_log;
 
        status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+out_free_log:
+       kfree(log);
 out:
        nvmet_req_complete(req, status);
 }
index 1304ec3..d8d91f0 100644 (file)
@@ -158,12 +158,11 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_loop_queue *queue = hctx->driver_data;
        struct request *req = bd->rq;
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+       bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
        blk_status_t ret;
 
-       ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
-               test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
-       if (unlikely(ret))
-               return ret;
+       if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+               return nvmf_fail_nonready_command(req);
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
index 44333bd..a97f4ea 100644 (file)
@@ -20,7 +20,7 @@ menuconfig PARPORT
          drive, PLIP link (Parallel Line Internet Protocol is mainly used to
          create a mini network by connecting the parallel ports of two local
          machines) etc., then you need to say Y here; please read
-         <file:Documentation/parport.txt> and
+         <file:Documentation/admin-guide/parport.rst> and
          <file:drivers/parport/BUGS-parport>.
 
          For extensive information about drivers for many devices attaching
@@ -33,7 +33,7 @@ menuconfig PARPORT
          the module will be called parport.
          If you have more than one parallel port and want to specify which
          port and IRQ to be used by this driver at module load time, take a
-         look at <file:Documentation/parport.txt>.
+         look at <file:Documentation/admin-guide/parport.rst>.
 
          If unsure, say Y.
 
@@ -71,7 +71,7 @@ config PARPORT_PC_FIFO
          As well as actually having a FIFO, or DMA capability, the kernel
          will need to know which IRQ the parallel port has.  By default,
          parallel port interrupts will not be used, and so neither will the
-         FIFO.  See <file:Documentation/parport.txt> to find out how to
+         FIFO.  See <file:Documentation/admin-guide/parport.rst> to find out how to
          specify which IRQ/DMA to use.
 
 config PARPORT_PC_SUPERIO
index f27cb18..ac4d488 100644 (file)
@@ -1052,7 +1052,7 @@ config SAMSUNG_LAPTOP
          function keys, wireless LED, LCD backlight level.
 
          It may also provide some sysfs files described in
-         <file:Documentation/ABI/testing/sysfs-platform-samsung-laptop>
+         <file:Documentation/ABI/testing/sysfs-driver-samsung-laptop>
 
          To compile this driver as a module, choose M here: the module
          will be called samsung-laptop.
index 73cce3e..d3a38c4 100644 (file)
@@ -1222,80 +1222,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
                device->hosts_dentry = pde;
 }
 
-/*
- * Allocate memory for a channel program with 'cplength' channel
- * command words and 'datasize' additional space. There are two
- * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
- * memory and 2) dasd_smalloc_request uses the static ccw memory
- * that gets allocated for each device.
- */
-struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
-{
-       struct dasd_ccw_req *cqr;
-
-       /* Sanity checks */
-       BUG_ON(datasize > PAGE_SIZE ||
-            (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
-       cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
-       if (cqr == NULL)
-               return ERR_PTR(-ENOMEM);
-       cqr->cpaddr = NULL;
-       if (cplength > 0) {
-               cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
-                                     GFP_ATOMIC | GFP_DMA);
-               if (cqr->cpaddr == NULL) {
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->data = NULL;
-       if (datasize > 0) {
-               cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
-               if (cqr->data == NULL) {
-                       kfree(cqr->cpaddr);
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->magic =  magic;
-       set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
-       dasd_get_device(device);
-       return cqr;
-}
-EXPORT_SYMBOL(dasd_kmalloc_request);
-
-struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+                                         struct dasd_device *device,
+                                         struct dasd_ccw_req *cqr)
 {
        unsigned long flags;
-       struct dasd_ccw_req *cqr;
-       char *data;
-       int size;
+       char *data, *chunk;
+       int size = 0;
 
-       size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
        if (cplength > 0)
                size += cplength * sizeof(struct ccw1);
        if (datasize > 0)
                size += datasize;
+       if (!cqr)
+               size += (sizeof(*cqr) + 7L) & -8L;
+
        spin_lock_irqsave(&device->mem_lock, flags);
-       cqr = (struct dasd_ccw_req *)
-               dasd_alloc_chunk(&device->ccw_chunks, size);
+       data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
        spin_unlock_irqrestore(&device->mem_lock, flags);
-       if (cqr == NULL)
+       if (!chunk)
                return ERR_PTR(-ENOMEM);
-       memset(cqr, 0, sizeof(struct dasd_ccw_req));
-       data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
-       cqr->cpaddr = NULL;
+       if (!cqr) {
+               cqr = (void *) data;
+               data += (sizeof(*cqr) + 7L) & -8L;
+       }
+       memset(cqr, 0, sizeof(*cqr));
+       cqr->mem_chunk = chunk;
        if (cplength > 0) {
-               cqr->cpaddr = (struct ccw1 *) data;
-               data += cplength*sizeof(struct ccw1);
-               memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+               cqr->cpaddr = data;
+               data += cplength * sizeof(struct ccw1);
+               memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
        }
-       cqr->data = NULL;
        if (datasize > 0) {
                cqr->data = data;
                memset(cqr->data, 0, datasize);
@@ -1307,33 +1264,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
 }
 EXPORT_SYMBOL(dasd_smalloc_request);
 
-/*
- * Free memory of a channel program. This function needs to free all the
- * idal lists that might have been created by dasd_set_cda and the
- * struct dasd_ccw_req itself.
- */
-void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
-{
-       struct ccw1 *ccw;
-
-       /* Clear any idals used for the request. */
-       ccw = cqr->cpaddr;
-       do {
-               clear_normalized_cda(ccw);
-       } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
-       kfree(cqr->cpaddr);
-       kfree(cqr->data);
-       kfree(cqr);
-       dasd_put_device(device);
-}
-EXPORT_SYMBOL(dasd_kfree_request);
-
 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&device->mem_lock, flags);
-       dasd_free_chunk(&device->ccw_chunks, cqr);
+       dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
        spin_unlock_irqrestore(&device->mem_lock, flags);
        dasd_put_device(device);
 }
@@ -1885,6 +1821,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
        }
 }
 
+static void __dasd_process_cqr(struct dasd_device *device,
+                              struct dasd_ccw_req *cqr)
+{
+       char errorstring[ERRORLENGTH];
+
+       switch (cqr->status) {
+       case DASD_CQR_SUCCESS:
+               cqr->status = DASD_CQR_DONE;
+               break;
+       case DASD_CQR_ERROR:
+               cqr->status = DASD_CQR_NEED_ERP;
+               break;
+       case DASD_CQR_CLEARED:
+               cqr->status = DASD_CQR_TERMINATED;
+               break;
+       default:
+               /* internal error 12 - wrong cqr status*/
+               snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+               dev_err(&device->cdev->dev,
+                       "An error occurred in the DASD device driver, "
+                       "reason=%s\n", errorstring);
+               BUG();
+       }
+       if (cqr->callback)
+               cqr->callback(cqr, cqr->callback_data);
+}
+
 /*
  * the cqrs from the final queue are returned to the upper layer
  * by setting a dasd_block state and calling the callback function
@@ -1895,40 +1858,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
        struct list_head *l, *n;
        struct dasd_ccw_req *cqr;
        struct dasd_block *block;
-       void (*callback)(struct dasd_ccw_req *, void *data);
-       void *callback_data;
-       char errorstring[ERRORLENGTH];
 
        list_for_each_safe(l, n, final_queue) {
                cqr = list_entry(l, struct dasd_ccw_req, devlist);
                list_del_init(&cqr->devlist);
                block = cqr->block;
-               callback = cqr->callback;
-               callback_data = cqr->callback_data;
-               if (block)
+               if (!block) {
+                       __dasd_process_cqr(device, cqr);
+               } else {
                        spin_lock_bh(&block->queue_lock);
-               switch (cqr->status) {
-               case DASD_CQR_SUCCESS:
-                       cqr->status = DASD_CQR_DONE;
-                       break;
-               case DASD_CQR_ERROR:
-                       cqr->status = DASD_CQR_NEED_ERP;
-                       break;
-               case DASD_CQR_CLEARED:
-                       cqr->status = DASD_CQR_TERMINATED;
-                       break;
-               default:
-                       /* internal error 12 - wrong cqr status*/
-                       snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
-                       dev_err(&device->cdev->dev,
-                               "An error occurred in the DASD device driver, "
-                               "reason=%s\n", errorstring);
-                       BUG();
-               }
-               if (cqr->callback != NULL)
-                       (callback)(cqr, callback_data);
-               if (block)
+                       __dasd_process_cqr(device, cqr);
                        spin_unlock_bh(&block->queue_lock);
+               }
        }
 }
 
@@ -3041,7 +2982,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
        cqr->callback_data = req;
        cqr->status = DASD_CQR_FILLED;
        cqr->dq = dq;
-       *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
 
        blk_mq_start_request(req);
        spin_lock(&block->queue_lock);
@@ -3072,7 +3012,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
        unsigned long flags;
        int rc = 0;
 
-       cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+       cqr = blk_mq_rq_to_pdu(req);
        if (!cqr)
                return BLK_EH_DONE;
 
@@ -3174,7 +3114,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
        int rc;
 
        block->tag_set.ops = &dasd_mq_ops;
-       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
+       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
        block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
        block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -4038,7 +3978,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
        struct ccw1 *ccw;
        unsigned long *idaw;
 
-       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+                                  NULL);
 
        if (IS_ERR(cqr)) {
                /* internal error 13 - Allocating the RDC request failed*/
index 5e963fe..e36a114 100644 (file)
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
        int rc;
        unsigned long flags;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr))
                return PTR_ERR(cqr);
        cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
        }
-       dasd_kfree_request(cqr, cqr->memdev);
+       dasd_sfree_request(cqr, cqr->memdev);
        return rc;
 }
 
index 131f198..e1fe024 100644 (file)
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Build the request */
        datasize = sizeof(struct dasd_diag_req) +
                count*sizeof(struct dasd_diag_bio);
-       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
index be208e7..bbf95b7 100644 (file)
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
        }
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
                                   0, /* use rcd_buf as data ara */
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                              "Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_features)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
                                "allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
                                  sizeof(struct dasd_psf_ssc_data),
-                                 device);
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
 
        cplength = 8;
        datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
         */
        itcw_size = itcw_calc_size(0, count, 0);
 
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
        cplength += count;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                 startdev);
+                                  startdev, NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        }
        /* Allocate the format ccw request. */
        fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, NULL);
        if (IS_ERR(fcp))
                return fcp;
 
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        }
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 
        /* Allocate the ccw request. */
        itcw_size = itcw_calc_size(0, ctidaw, 0);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
 
        useglobal = 0;
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
-                                  sizeof(struct dasd_snid_data), device);
+                                  sizeof(struct dasd_snid_data), device,
+                                  NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_perf_stats_t)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "Could not allocate initialization request");
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
        psf1 = psf_data[1];
 
        /* setup CCWs for PSF + RSSD */
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                        "Could not allocate initialization request");
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_messages)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   sizeof(struct dasd_psf_prssd_data) + 1,
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
        int rc;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
-                                 sizeof(struct dasd_psf_cuir_response),
-                                 device);
+                                  sizeof(struct dasd_psf_cuir_response),
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
index 0af8c52..6ef8714 100644 (file)
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
                 * is a new ccw in device->eer_cqr. Free the "old"
                 * snss request now.
                 */
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
        if (rc)
                goto out;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
-                                  SNSS_DATA_SIZE, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+                                  SNSS_DATA_SIZE, device, NULL);
        if (IS_ERR(cqr)) {
                rc = -ENOMEM;
                cqr = NULL;
@@ -505,7 +505,7 @@ out:
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
        if (cqr)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 
        return rc;
 }
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
        in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        if (cqr && !in_use)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
index a6b132f..56007a3 100644 (file)
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
        datasize = sizeof(struct DE_fba_data) +
                nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
 
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
                datasize += (count - 1)*sizeof(struct LO_fba_data);
        }
        /* Allocate the ccw request. */
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
index 96709b1..976b6bd 100644 (file)
@@ -158,40 +158,33 @@ do { \
 
 struct dasd_ccw_req {
        unsigned int magic;             /* Eye catcher */
+       int intrc;                      /* internal error, e.g. from start_IO */
        struct list_head devlist;       /* for dasd_device request queue */
        struct list_head blocklist;     /* for dasd_block request queue */
-
-       /* Where to execute what... */
        struct dasd_block *block;       /* the originating block device */
        struct dasd_device *memdev;     /* the device used to allocate this */
        struct dasd_device *startdev;   /* device the request is started on */
        struct dasd_device *basedev;    /* base device if no block->base */
        void *cpaddr;                   /* address of ccw or tcw */
+       short retries;                  /* A retry counter */
        unsigned char cpmode;           /* 0 = cmd mode, 1 = itcw */
        char status;                    /* status of this request */
-       short retries;                  /* A retry counter */
+       char lpm;                       /* logical path mask */
        unsigned long flags;            /* flags of this request */
        struct dasd_queue *dq;
-
-       /* ... and how */
        unsigned long starttime;        /* jiffies time of request start */
        unsigned long expires;          /* expiration period in jiffies */
-       char lpm;                       /* logical path mask */
        void *data;                     /* pointer to data area */
-
-       /* these are important for recovering erroneous requests          */
-       int intrc;                      /* internal error, e.g. from start_IO */
        struct irb irb;                 /* device status in case of an error */
        struct dasd_ccw_req *refers;    /* ERP-chain queueing. */
        void *function;                 /* originating ERP action */
+       void *mem_chunk;
 
-       /* these are for statistics only */
        unsigned long buildclk;         /* TOD-clock of request generation */
        unsigned long startclk;         /* TOD-clock of request start */
        unsigned long stopclk;          /* TOD-clock of request interrupt */
        unsigned long endclk;           /* TOD-clock of request termination */
 
-        /* Callback that is called after reaching final status. */
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
 };
@@ -714,19 +707,10 @@ extern const struct block_device_operations dasd_device_operations;
 extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
-dasd_kmalloc_request(int , int, int, struct dasd_device *);
-struct dasd_ccw_req *
-dasd_smalloc_request(int , int, int, struct dasd_device *);
-void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
 void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
 
-static inline int
-dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
-{
-       return set_normalized_cda(ccw, cda);
-}
-
 struct dasd_device *dasd_alloc_device(void);
 void dasd_free_device(struct dasd_device *);
 
index a070ef0..f230516 100644 (file)
@@ -5,6 +5,7 @@
 
 # The following is required for define_trace.h to find ./trace.h
 CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
 
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
        fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
index dce92b2..dbe7c7a 100644 (file)
 #define CCWCHAIN_LEN_MAX       256
 
 struct pfn_array {
+       /* Starting guest physical I/O address. */
        unsigned long           pa_iova;
+       /* Array that stores PFNs of the pages need to pin. */
        unsigned long           *pa_iova_pfn;
+       /* Array that receives PFNs of the pages pinned. */
        unsigned long           *pa_pfn;
+       /* Number of pages pinned from @pa_iova. */
        int                     pa_nr;
 };
 
@@ -46,70 +50,33 @@ struct ccwchain {
 };
 
 /*
- * pfn_array_pin() - pin user pages in memory
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
  * @pa: pfn_array on which to perform the operation
  * @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
  *
- * Attempt to pin user pages in memory.
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
  *
  * Usage of pfn_array:
- * @pa->pa_iova     starting guest physical I/O address. Assigned by caller.
- * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
- *                  by caller.
- * @pa->pa_pfn      array that receives PFNs of the pages pinned. Allocated by
- *                  caller.
- * @pa->pa_nr       number of pages from @pa->pa_iova to pin. Assigned by
- *                  caller.
- *                  number of pages pinned. Assigned by callee.
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
  *
  * Returns:
  *   Number of pages pinned on success.
- *   If @pa->pa_nr is 0 or negative, returns 0.
+ *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ *   returns -EINVAL.
  *   If no pages were pinned, returns -errno.
  */
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
-{
-       int i, ret;
-
-       if (pa->pa_nr <= 0) {
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
-       for (i = 1; i < pa->pa_nr; i++)
-               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
-
-       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
-                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
-
-       if (ret > 0 && ret != pa->pa_nr) {
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       return ret;
-}
-
-/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
-{
-       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
-       pa->pa_nr = 0;
-       kfree(pa->pa_iova_pfn);
-}
-
-/* Alloc memory for PFNs, then pin pages with them. */
 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                               u64 iova, unsigned int len)
 {
-       int ret = 0;
+       int i, ret = 0;
 
        if (!len)
                return 0;
 
-       if (pa->pa_nr)
+       if (pa->pa_nr || pa->pa_iova_pfn)
                return -EINVAL;
 
        pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                return -ENOMEM;
        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
-       ret = pfn_array_pin(pa, mdev);
+       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+       for (i = 1; i < pa->pa_nr; i++)
+               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
 
-       if (ret > 0)
-               return ret;
-       else if (!ret)
+       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+       if (ret < 0) {
+               goto err_out;
+       } else if (ret > 0 && ret != pa->pa_nr) {
+               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
                ret = -EINVAL;
+               goto err_out;
+       }
 
+       return ret;
+
+err_out:
+       pa->pa_nr = 0;
        kfree(pa->pa_iova_pfn);
+       pa->pa_iova_pfn = NULL;
 
        return ret;
 }
 
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+       pa->pa_nr = 0;
+       kfree(pa->pa_iova_pfn);
+}
+
 static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
 {
        pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
  * This is the chain length not considering any TICs.
  * You need to do a new round for each TIC target.
  *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
  * Returns: the length of the ccw chain or -errno.
  */
 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
        do {
                cnt++;
 
+               /*
+                * As we don't want to fail direct addressing even if the
+                * orb specified one of the unsupported formats, we defer
+                * checking for IDAWs in unsupported formats to here.
+                */
+               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+                       return -EOPNOTSUPP;
+
                if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
                        break;
 
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        struct ccw1 *ccw;
        struct pfn_array_table *pat;
        unsigned long *idaws;
-       int idaw_nr;
+       int ret;
 
        ccw = chain->ch_ccw + idx;
 
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
         * needed when translating a direct ccw to a idal ccw.
         */
        pat = chain->ch_pat + idx;
-       if (pfn_array_table_init(pat, 1))
-               return -ENOMEM;
-       idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
-                                     ccw->cda, ccw->count);
-       if (idaw_nr < 0)
-               return idaw_nr;
+       ret = pfn_array_table_init(pat, 1);
+       if (ret)
+               goto out_init;
+
+       ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+       if (ret < 0)
+               goto out_init;
 
        /* Translate this direct ccw to a idal ccw. */
-       idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+       idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
        if (!idaws) {
-               pfn_array_table_unpin_free(pat, cp->mdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unpin;
        }
        ccw->cda = (__u32) virt_to_phys(idaws);
        ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        pfn_array_table_idal_create_words(pat, idaws);
 
        return 0;
+
+out_unpin:
+       pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
+       return ret;
 }
 
 static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
        pat = chain->ch_pat + idx;
        ret = pfn_array_table_init(pat, idaw_nr);
        if (ret)
-               return ret;
+               goto out_init;
 
        /* Translate idal ccw to use new allocated idaws. */
        idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ out_free_idaws:
        kfree(idaws);
 out_unpin:
        pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
        return ret;
 }
 
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        /*
         * XXX:
         * Only support prefetch enable mode now.
-        * Only support 64bit addressing idal.
-        * Only support 4k IDAW.
         */
-       if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
+       if (!orb->cmd.pfch)
                return -EOPNOTSUPP;
 
        INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        ret = ccwchain_loop_tic(chain, cp);
        if (ret)
                cp_unpin_free(cp);
+       /* It is safe to force: if not set but idals used
+        * ccwchain_calc_length returns an error.
+        */
+       cp->orb.cmd.c64 = 1;
 
        return ret;
 }
index ea6a2d0..770fa9c 100644 (file)
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 {
        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
        unsigned long flags;
+       int rc = -EAGAIN;
 
        spin_lock_irqsave(sch->lock, flags);
        if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 
        if (cio_update_schib(sch)) {
                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+               rc = 0;
                goto out_unlock;
        }
 
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
                                 VFIO_CCW_STATE_STANDBY;
        }
+       rc = 0;
 
 out_unlock:
        spin_unlock_irqrestore(sch->lock, flags);
 
-       return 0;
+       return rc;
 }
 
 static struct css_device_id vfio_ccw_sch_ids[] = {
index 3c80064..797a827 100644 (file)
@@ -13,6 +13,9 @@
 #include "ioasm.h"
 #include "vfio_ccw_private.h"
 
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
 static int fsm_io_helper(struct vfio_ccw_private *private)
 {
        struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
         */
        cio_disable_subchannel(sch);
 }
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+       return p->sch->schid;
+}
 
 /*
  * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        union scsw *scsw = &private->scsw;
        struct ccw_io_region *io_region = &private->io_region;
        struct mdev_device *mdev = private->mdev;
+       char *errstr = "request";
 
        private->state = VFIO_CCW_STATE_BOXED;
 
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Don't try to build a cp if transport mode is specified. */
                if (orb->tm.b) {
                        io_region->ret_code = -EOPNOTSUPP;
+                       errstr = "transport mode";
                        goto err_out;
                }
                io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
                                              orb);
-               if (io_region->ret_code)
+               if (io_region->ret_code) {
+                       errstr = "cp init";
                        goto err_out;
+               }
 
                io_region->ret_code = cp_prefetch(&private->cp);
                if (io_region->ret_code) {
+                       errstr = "cp prefetch";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Start channel program and wait for I/O interrupt. */
                io_region->ret_code = fsm_io_helper(private);
                if (io_region->ret_code) {
+                       errstr = "cp fsm_io_helper";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 
 err_out:
        private->state = VFIO_CCW_STATE_IDLE;
+       trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+                              io_region->ret_code, errstr);
 }
 
 /*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644 (file)
index 0000000..b1da53d
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+       TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+       TP_ARGS(fctl, schid, errno, errstr),
+
+       TP_STRUCT__entry(
+               __field(int, fctl)
+               __field_struct(struct subchannel_id, schid)
+               __field(int, errno)
+               __field(char*, errstr)
+       ),
+
+       TP_fast_assign(
+               __entry->fctl = fctl;
+               __entry->schid = schid;
+               __entry->errno = errno;
+               __entry->errstr = errstr;
+       ),
+
+       TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+                 __entry->schid.cssid,
+                 __entry->schid.ssid,
+                 __entry->schid.sch_no,
+                 __entry->fctl,
+                 __entry->errno,
+                 __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
index 1754f55..524f9ea 100644 (file)
@@ -30,7 +30,7 @@
  * the recommended way for applications to use the coprocessor, and
  * the driver interface is not intended for general use.
  *
- * See Documentation/sparc/oradax/oracle_dax.txt for more details.
+ * See Documentation/sparc/oradax/oracle-dax.txt for more details.
  */
 
 #include <linux/uaccess.h>
index 8974a0f..4b5e250 100644 (file)
@@ -1291,7 +1291,7 @@ restore_params:
  *
  * @stream: Soundwire stream
  *
- * Documentation/soundwire/stream.txt explains this API in detail
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
  */
 int sdw_prepare_stream(struct sdw_stream_runtime *stream)
 {
@@ -1348,7 +1348,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
  *
  * @stream: Soundwire stream
  *
- * Documentation/soundwire/stream.txt explains this API in detail
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
  */
 int sdw_enable_stream(struct sdw_stream_runtime *stream)
 {
@@ -1400,7 +1400,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
  *
  * @stream: Soundwire stream
  *
- * Documentation/soundwire/stream.txt explains this API in detail
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
  */
 int sdw_disable_stream(struct sdw_stream_runtime *stream)
 {
@@ -1456,7 +1456,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
  *
  * @stream: Soundwire stream
  *
- * Documentation/soundwire/stream.txt explains this API in detail
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
  */
 int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
 {
index 0ba6771..72ba9da 100644 (file)
@@ -11,7 +11,7 @@ pool management for network interfaces.
 This document provides an overview the Linux DPIO driver, its
 subcomponents, and its APIs.
 
-See Documentation/dpaa2/overview.txt for a general overview of DPAA2
+See Documentation/networking/dpaa2/overview.rst for a general overview of DPAA2
 and the general DPAA2 driver architecture in Linux.
 
 Driver Overview
index 051f85d..6bee2a2 100644 (file)
@@ -3,7 +3,7 @@ TODO:
 From the initial code review:
 
 The main thing you need to do is to implement all the controls using the
-control framework (see Documentation/video4linux/v4l2-controls.txt).
+control framework (see Documentation/media/kapi/v4l2-controls.rst).
 Most drivers are by now converted to the control framework, so you will
 find many examples of how to do this in drivers/media/radio.
 
index 63df5de..34a1813 100644 (file)
@@ -7,7 +7,7 @@ config VIDEO_ZORAN
          36057/36067 PCI controller chipset. This includes the Iomega
          Buz, Pinnacle DC10+ and the Linux Media Labs LML33. There is
          a driver homepage at <http://mjpeg.sf.net/driver-zoran/>. For
-         more information, check <file:Documentation/video4linux/Zoran>.
+         more information, check <file:Documentation/media/v4l-drivers/zoran.rst>.
 
          To compile this driver as a module, choose M here: the
          module will be called zr36067.
index d942542..591a13a 100644 (file)
@@ -1437,7 +1437,7 @@ config FB_SIS_315
 
 config FB_VIA
        tristate "VIA UniChrome (Pro) and Chrome9 display support"
-       depends on FB && PCI && X86 && GPIOLIB && I2C
+       depends on FB && PCI && GPIOLIB && I2C && (X86 || COMPILE_TEST)
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -1888,7 +1888,6 @@ config FB_W100
 config FB_SH_MOBILE_LCDC
        tristate "SuperH Mobile LCDC framebuffer support"
        depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK
-       depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
@@ -2253,39 +2252,6 @@ config FB_BROADSHEET
          and could also have been called by other names when coupled with
          a bridge adapter.
 
-config FB_AUO_K190X
-       tristate "AUO-K190X EPD controller support"
-       depends on FB
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
-       select FB_DEFERRED_IO
-       help
-         Provides support for epaper controllers from the K190X series
-         of AUO. These controllers can be used to drive epaper displays
-         from Sipix.
-
-         This option enables the common support, shared by the individual
-         controller drivers. You will also have to enable the driver
-         for the controller type used in your device.
-
-config FB_AUO_K1900
-       tristate "AUO-K1900 EPD controller support"
-       depends on FB && FB_AUO_K190X
-       help
-         This driver implements support for the AUO K1900 epd-controller.
-         This controller can drive Sipix epaper displays but can only do
-         serial updates, reducing the number of possible frames per second.
-
-config FB_AUO_K1901
-       tristate "AUO-K1901 EPD controller support"
-       depends on FB && FB_AUO_K190X
-       help
-         This driver implements support for the AUO K1901 epd-controller.
-         This controller can drive Sipix epaper displays and supports
-         concurrent updates, making higher frames per second possible.
-
 config FB_JZ4740
        tristate "JZ4740 LCD framebuffer support"
        depends on FB && MACH_JZ4740
@@ -2346,18 +2312,6 @@ source "drivers/video/fbdev/omap/Kconfig"
 source "drivers/video/fbdev/omap2/Kconfig"
 source "drivers/video/fbdev/mmp/Kconfig"
 
-config FB_SH_MOBILE_MERAM
-       tristate "SuperH Mobile MERAM read ahead support"
-       depends on (SUPERH || ARCH_SHMOBILE)
-       select GENERIC_ALLOCATOR
-       ---help---
-         Enable MERAM support for the SuperH controller.
-
-         This will allow for caching of the framebuffer to provide more
-         reliable access under heavy main memory bus traffic situations.
-         Up to 4 memory channels can be configured, allowing 4 RGB or
-         2 YCbCr framebuffers to be configured.
-
 config FB_SSD1307
        tristate "Solomon SSD1307 framebuffer support"
        depends on FB && I2C
index 55282a2..13c9003 100644 (file)
@@ -100,9 +100,6 @@ obj-$(CONFIG_FB_PMAGB_B)      += pmagb-b-fb.o
 obj-$(CONFIG_FB_MAXINE)                  += maxinefb.o
 obj-$(CONFIG_FB_METRONOME)        += metronomefb.o
 obj-$(CONFIG_FB_BROADSHEET)       += broadsheetfb.o
-obj-$(CONFIG_FB_AUO_K190X)       += auo_k190x.o
-obj-$(CONFIG_FB_AUO_K1900)       += auo_k1900fb.o
-obj-$(CONFIG_FB_AUO_K1901)       += auo_k1901fb.o
 obj-$(CONFIG_FB_S1D13XXX)        += s1d13xxxfb.o
 obj-$(CONFIG_FB_SH7760)                  += sh7760fb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
@@ -116,7 +113,6 @@ obj-$(CONFIG_FB_SM501)            += sm501fb.o
 obj-$(CONFIG_FB_UDL)             += udlfb.o
 obj-$(CONFIG_FB_SMSCUFX)         += smscufx.o
 obj-$(CONFIG_FB_XILINX)           += xilinxfb.o
-obj-$(CONFIG_FB_SH_MOBILE_MERAM)  += sh_mobile_meram.o
 obj-$(CONFIG_FB_SH_MOBILE_LCDC)          += sh_mobile_lcdcfb.o
 obj-$(CONFIG_FB_OMAP)             += omap/
 obj-y                             += omap2/
index 09b0e55..6cc4686 100644 (file)
@@ -2442,7 +2442,7 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
                (void)aty_ld_pll(POWER_MANAGEMENT);
                aty_st_le32(BUS_CNTL1, 0x00000010);
                aty_st_le32(MEM_POWER_MISC, 0x0c830000);
-               mdelay(100);
+               msleep(100);
 
                /* Switch PCI power management to D2 */
                pci_set_power_state(pdev, PCI_D2);
index 7137c12..e695adb 100644 (file)
@@ -2678,17 +2678,17 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
                 * it, we'll restore the dynamic clocks state on wakeup
                 */
                radeon_pm_disable_dynamic_mode(rinfo);
-               mdelay(50);
+               msleep(50);
                radeon_pm_save_regs(rinfo, 1);
 
                if (rinfo->is_mobility && !(rinfo->pm_mode & radeon_pm_d2)) {
                        /* Switch off LVDS interface */
-                       mdelay(1);
+                       usleep_range(1000, 2000);
                        OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_BL_MOD_EN));
-                       mdelay(1);
+                       usleep_range(1000, 2000);
                        OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_EN | LVDS_ON));
                        OUTREG(LVDS_PLL_CNTL, (INREG(LVDS_PLL_CNTL) & ~30000) | 0x20000);
-                       mdelay(20);
+                       msleep(20);
                        OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON));
                }
                pci_disable_device(pdev);
index d555a78..0adf068 100644 (file)
@@ -464,7 +464,7 @@ static int au1100fb_drv_probe(struct platform_device *dev)
                                            PAGE_ALIGN(fbdev->fb_len),
                                            &fbdev->fb_phys, GFP_KERNEL);
        if (!fbdev->fb_mem) {
-               print_err("fail to allocate frambuffer (size: %dK))",
+               print_err("fail to allocate framebuffer (size: %dK))",
                          fbdev->fb_len / 1024);
                return -ENOMEM;
        }
index 87d5a62..3872cce 100644 (file)
@@ -1696,7 +1696,7 @@ static int au1200fb_drv_probe(struct platform_device *dev)
                                &fbdev->fb_phys, GFP_KERNEL,
                                DMA_ATTR_NON_CONSISTENT);
                if (!fbdev->fb_mem) {
-                       print_err("fail to allocate frambuffer (size: %dK))",
+                       print_err("fail to allocate framebuffer (size: %dK))",
                                  fbdev->fb_len / 1024);
                        ret = -ENOMEM;
                        goto failed;
diff --git a/drivers/video/fbdev/auo_k1900fb.c b/drivers/video/fbdev/auo_k1900fb.c
deleted file mode 100644 (file)
index 7637c60..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * auok190xfb.c -- FB driver for AUO-K1900 controllers
- *
- * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
- *
- * based on broadsheetfb.c
- *
- * Copyright (C) 2008, Jaya Kumar
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
- *
- * This driver is written to be used with the AUO-K1900 display controller.
- *
- * It is intended to be architecture independent. A board specific driver
- * must be used to perform all the physical IO interactions.
- *
- * The controller supports different update modes:
- * mode0+1 16 step gray (4bit)
- * mode2 4 step gray (2bit) - FIXME: add strange refresh
- * mode3 2 step gray (1bit) - FIXME: add strange refresh
- * mode4 handwriting mode (strange behaviour)
- * mode5 automatic selection of update mode
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/list.h>
-#include <linux/firmware.h>
-#include <linux/gpio.h>
-#include <linux/pm_runtime.h>
-
-#include <video/auo_k190xfb.h>
-
-#include "auo_k190x.h"
-
-/*
- * AUO-K1900 specific commands
- */
-
-#define AUOK1900_CMD_PARTIALDISP       0x1001
-#define AUOK1900_CMD_ROTATION          0x1006
-#define AUOK1900_CMD_LUT_STOP          0x1009
-
-#define AUOK1900_INIT_TEMP_AVERAGE     (1 << 13)
-#define AUOK1900_INIT_ROTATE(_x)       ((_x & 0x3) << 10)
-#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
-
-static void auok1900_init(struct auok190xfb_par *par)
-{
-       struct device *dev = par->info->device;
-       struct auok190x_board *board = par->board;
-       u16 init_param = 0;
-
-       pm_runtime_get_sync(dev);
-
-       init_param |= AUOK1900_INIT_TEMP_AVERAGE;
-       init_param |= AUOK1900_INIT_ROTATE(par->rotation);
-       init_param |= AUOK190X_INIT_INVERSE_WHITE;
-       init_param |= AUOK190X_INIT_FORMAT0;
-       init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
-       init_param |= AUOK190X_INIT_SHIFT_RIGHT;
-
-       auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
-
-       /* let the controller finish */
-       board->wait_for_rdy(par);
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-}
-
-static void auok1900_update_region(struct auok190xfb_par *par, int mode,
-                                               u16 y1, u16 y2)
-{
-       struct device *dev = par->info->device;
-       unsigned char *buf = (unsigned char *)par->info->screen_base;
-       int xres = par->info->var.xres;
-       int line_length = par->info->fix.line_length;
-       u16 args[4];
-
-       pm_runtime_get_sync(dev);
-
-       mutex_lock(&(par->io_lock));
-
-       /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
-       y1 &= 0xfffe;
-       y2 &= 0xfffe;
-
-       dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
-               1, y1+1, xres, y2-y1, mode);
-
-       /* to FIX handle different partial update modes */
-       args[0] = mode | 1;
-       args[1] = y1 + 1;
-       args[2] = xres;
-       args[3] = y2 - y1;
-       buf += y1 * line_length;
-       auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
-                                    ((y2 - y1) * line_length)/2, (u16 *) buf);
-       auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
-
-       par->update_cnt++;
-
-       mutex_unlock(&(par->io_lock));
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-}
-
-static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
-                                               u16 y1, u16 y2)
-{
-       int mode;
-
-       if (par->update_mode < 0) {
-               mode = AUOK190X_UPDATE_MODE(1);
-               par->last_mode = -1;
-       } else {
-               mode = AUOK190X_UPDATE_MODE(par->update_mode);
-               par->last_mode = par->update_mode;
-       }
-
-       if (par->flash)
-               mode |= AUOK190X_UPDATE_NONFLASH;
-
-       auok1900_update_region(par, mode, y1, y2);
-}
-
-static void auok1900fb_dpy_update(struct auok190xfb_par *par)
-{
-       int mode;
-
-       if (par->update_mode < 0) {
-               mode = AUOK190X_UPDATE_MODE(0);
-               par->last_mode = -1;
-       } else {
-               mode = AUOK190X_UPDATE_MODE(par->update_mode);
-               par->last_mode = par->update_mode;
-       }
-
-       if (par->flash)
-               mode |= AUOK190X_UPDATE_NONFLASH;
-
-       auok1900_update_region(par, mode, 0, par->info->var.yres);
-       par->update_cnt = 0;
-}
-
-static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
-{
-       return (par->update_cnt > 10);
-}
-
-static int auok1900fb_probe(struct platform_device *pdev)
-{
-       struct auok190x_init_data init;
-       struct auok190x_board *board;
-
-       /* pick up board specific routines */
-       board = pdev->dev.platform_data;
-       if (!board)
-               return -EINVAL;
-
-       /* fill temporary init struct for common init */
-       init.id = "auo_k1900fb";
-       init.board = board;
-       init.update_partial = auok1900fb_dpy_update_pages;
-       init.update_all = auok1900fb_dpy_update;
-       init.need_refresh = auok1900fb_need_refresh;
-       init.init = auok1900_init;
-
-       return auok190x_common_probe(pdev, &init);
-}
-
-static int auok1900fb_remove(struct platform_device *pdev)
-{
-       return auok190x_common_remove(pdev);
-}
-
-static struct platform_driver auok1900fb_driver = {
-       .probe  = auok1900fb_probe,
-       .remove = auok1900fb_remove,
-       .driver = {
-               .name   = "auo_k1900fb",
-               .pm = &auok190x_pm,
-       },
-};
-module_platform_driver(auok1900fb_driver);
-
-MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
-MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/auo_k1901fb.c b/drivers/video/fbdev/auo_k1901fb.c
deleted file mode 100644 (file)
index 681fe61..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * auok190xfb.c -- FB driver for AUO-K1901 controllers
- *
- * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
- *
- * based on broadsheetfb.c
- *
- * Copyright (C) 2008, Jaya Kumar
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
- *
- * This driver is written to be used with the AUO-K1901 display controller.
- *
- * It is intended to be architecture independent. A board specific driver
- * must be used to perform all the physical IO interactions.
- *
- * The controller supports different update modes:
- * mode0+1 16 step gray (4bit)
- * mode2+3 4 step gray (2bit)
- * mode4+5 2 step gray (1bit)
- * - mode4 is described as "without LUT"
- * mode7 automatic selection of update mode
- *
- * The most interesting difference to the K1900 is the ability to do screen
- * updates in an asynchronous fashion. Where the K1900 needs to wait for the
- * current update to complete, the K1901 can process later updates already.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/list.h>
-#include <linux/firmware.h>
-#include <linux/gpio.h>
-#include <linux/pm_runtime.h>
-
-#include <video/auo_k190xfb.h>
-
-#include "auo_k190x.h"
-
-/*
- * AUO-K1901 specific commands
- */
-
-#define AUOK1901_CMD_LUT_INTERFACE     0x0005
-#define AUOK1901_CMD_DMA_START         0x1001
-#define AUOK1901_CMD_CURSOR_START      0x1007
-#define AUOK1901_CMD_CURSOR_STOP       AUOK190X_CMD_DATA_STOP
-#define AUOK1901_CMD_DDMA_START                0x1009
-
-#define AUOK1901_INIT_GATE_PULSE_LOW   (0 << 14)
-#define AUOK1901_INIT_GATE_PULSE_HIGH  (1 << 14)
-#define AUOK1901_INIT_SINGLE_GATE      (0 << 13)
-#define AUOK1901_INIT_DOUBLE_GATE      (1 << 13)
-
-/* Bits to pixels
- *   Mode      15-12   11-8    7-4     3-0
- *   format2   2       T       1       T
- *   format3   1       T       2       T
- *   format4   T       2       T       1
- *   format5   T       1       T       2
- *
- *   halftone modes:
- *   format6   2       2       1       1
- *   format7   1       1       2       2
- */
-#define AUOK1901_INIT_FORMAT2          (1 << 7)
-#define AUOK1901_INIT_FORMAT3          ((1 << 7) | (1 << 6))
-#define AUOK1901_INIT_FORMAT4          (1 << 8)
-#define AUOK1901_INIT_FORMAT5          ((1 << 8) | (1 << 6))
-#define AUOK1901_INIT_FORMAT6          ((1 << 8) | (1 << 7))
-#define AUOK1901_INIT_FORMAT7          ((1 << 8) | (1 << 7) | (1 << 6))
-
-/* res[4] to bit 10
- * res[3-0] to bits 5-2
- */
-#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
-                                        | ((_res & 0xf) << 2))
-
-/*
- * portrait / landscape orientation in AUOK1901_CMD_DMA_START
- */
-#define AUOK1901_DMA_ROTATE90(_rot)            ((_rot & 1) << 13)
-
-/*
- * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
- */
-#define AUOK1901_DDMA_ROTATE180(_rot)          ((~_rot & 2) << 10)
-
-static void auok1901_init(struct auok190xfb_par *par)
-{
-       struct device *dev = par->info->device;
-       struct auok190x_board *board = par->board;
-       u16 init_param = 0;
-
-       pm_runtime_get_sync(dev);
-
-       init_param |= AUOK190X_INIT_INVERSE_WHITE;
-       init_param |= AUOK190X_INIT_FORMAT0;
-       init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
-       init_param |= AUOK190X_INIT_SHIFT_LEFT;
-
-       auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
-
-       /* let the controller finish */
-       board->wait_for_rdy(par);
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-}
-
-static void auok1901_update_region(struct auok190xfb_par *par, int mode,
-                                               u16 y1, u16 y2)
-{
-       struct device *dev = par->info->device;
-       unsigned char *buf = (unsigned char *)par->info->screen_base;
-       int xres = par->info->var.xres;
-       int line_length = par->info->fix.line_length;
-       u16 args[5];
-
-       pm_runtime_get_sync(dev);
-
-       mutex_lock(&(par->io_lock));
-
-       /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
-       y1 &= 0xfffe;
-       y2 &= 0xfffe;
-
-       dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
-               1, y1+1, xres, y2-y1, mode);
-
-       /* K1901: first transfer the region data */
-       args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
-       args[1] = y1 + 1;
-       args[2] = xres;
-       args[3] = y2 - y1;
-       buf += y1 * line_length;
-       auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
-                                           args, ((y2 - y1) * line_length)/2,
-                                           (u16 *) buf);
-       auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
-
-       /* K1901: second tell the controller to update the region with mode */
-       args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
-       args[1] = 1;
-       args[2] = y1 + 1;
-       args[3] = xres;
-       args[4] = y2 - y1;
-       auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
-
-       par->update_cnt++;
-
-       mutex_unlock(&(par->io_lock));
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-}
-
-static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
-                                               u16 y1, u16 y2)
-{
-       int mode;
-
-       if (par->update_mode < 0) {
-               mode = AUOK190X_UPDATE_MODE(1);
-               par->last_mode = -1;
-       } else {
-               mode = AUOK190X_UPDATE_MODE(par->update_mode);
-               par->last_mode = par->update_mode;
-       }
-
-       if (par->flash)
-               mode |= AUOK190X_UPDATE_NONFLASH;
-
-       auok1901_update_region(par, mode, y1, y2);
-}
-
-static void auok1901fb_dpy_update(struct auok190xfb_par *par)
-{
-       int mode;
-
-       /* When doing full updates, wait for the controller to be ready
-        * This will hopefully catch some hangs of the K1901
-        */
-       par->board->wait_for_rdy(par);
-
-       if (par->update_mode < 0) {
-               mode = AUOK190X_UPDATE_MODE(0);
-               par->last_mode = -1;
-       } else {
-               mode = AUOK190X_UPDATE_MODE(par->update_mode);
-               par->last_mode = par->update_mode;
-       }
-
-       if (par->flash)
-               mode |= AUOK190X_UPDATE_NONFLASH;
-
-       auok1901_update_region(par, mode, 0, par->info->var.yres);
-       par->update_cnt = 0;
-}
-
-static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
-{
-       return (par->update_cnt > 10);
-}
-
-static int auok1901fb_probe(struct platform_device *pdev)
-{
-       struct auok190x_init_data init;
-       struct auok190x_board *board;
-
-       /* pick up board specific routines */
-       board = pdev->dev.platform_data;
-       if (!board)
-               return -EINVAL;
-
-       /* fill temporary init struct for common init */
-       init.id = "auo_k1901fb";
-       init.board = board;
-       init.update_partial = auok1901fb_dpy_update_pages;
-       init.update_all = auok1901fb_dpy_update;
-       init.need_refresh = auok1901fb_need_refresh;
-       init.init = auok1901_init;
-
-       return auok190x_common_probe(pdev, &init);
-}
-
-static int auok1901fb_remove(struct platform_device *pdev)
-{
-       return auok190x_common_remove(pdev);
-}
-
-static struct platform_driver auok1901fb_driver = {
-       .probe  = auok1901fb_probe,
-       .remove = auok1901fb_remove,
-       .driver = {
-               .name   = "auo_k1901fb",
-               .pm = &auok190x_pm,
-       },
-};
-module_platform_driver(auok1901fb_driver);
-
-MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
-MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c
deleted file mode 100644 (file)
index 9d24d1b..0000000
+++ /dev/null
@@ -1,1195 +0,0 @@
-/*
- * Common code for AUO-K190X framebuffer drivers
- *
- * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/sched/mm.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/fb.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/regulator/consumer.h>
-
-#include <video/auo_k190xfb.h>
-
-#include "auo_k190x.h"
-
-struct panel_info {
-       int w;
-       int h;
-};
-
-/* table of panel specific parameters to be indexed into by the board drivers */
-static struct panel_info panel_table[] = {
-       /* standard 6" */
-       [AUOK190X_RESOLUTION_800_600] = {
-               .w = 800,
-               .h = 600,
-       },
-       /* standard 9" */
-       [AUOK190X_RESOLUTION_1024_768] = {
-               .w = 1024,
-               .h = 768,
-       },
-       [AUOK190X_RESOLUTION_600_800] = {
-               .w = 600,
-               .h = 800,
-       },
-       [AUOK190X_RESOLUTION_768_1024] = {
-               .w = 768,
-               .h = 1024,
-       },
-};
-
-/*
- * private I80 interface to the board driver
- */
-
-static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
-{
-       par->board->set_ctl(par, AUOK190X_I80_WR, 0);
-       par->board->set_hdb(par, data);
-       par->board->set_ctl(par, AUOK190X_I80_WR, 1);
-}
-
-static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
-{
-       par->board->set_ctl(par, AUOK190X_I80_DC, 0);
-       auok190x_issue_data(par, data);
-       par->board->set_ctl(par, AUOK190X_I80_DC, 1);
-}
-
-/**
- * Conversion of 16bit color to 4bit grayscale
- * does roughly (0.3 * R + 0.6 G + 0.1 B) / 2
- */
-static inline int rgb565_to_gray4(u16 data, struct fb_var_screeninfo *var)
-{
-       return ((((data & 0xF800) >> var->red.offset) * 77 +
-                ((data & 0x07E0) >> (var->green.offset + 1)) * 151 +
-                ((data & 0x1F) >> var->blue.offset) * 28) >> 8 >> 1);
-}
-
-static int auok190x_issue_pixels_rgb565(struct auok190xfb_par *par, int size,
-                                       u16 *data)
-{
-       struct fb_var_screeninfo *var = &par->info->var;
-       struct device *dev = par->info->device;
-       int i;
-       u16 tmp;
-
-       if (size & 7) {
-               dev_err(dev, "issue_pixels: size %d must be a multiple of 8\n",
-                       size);
-               return -EINVAL;
-       }
-
-       for (i = 0; i < (size >> 2); i++) {
-               par->board->set_ctl(par, AUOK190X_I80_WR, 0);
-
-               tmp  = (rgb565_to_gray4(data[4*i], var) & 0x000F);
-               tmp |= (rgb565_to_gray4(data[4*i+1], var) << 4) & 0x00F0;
-               tmp |= (rgb565_to_gray4(data[4*i+2], var) << 8) & 0x0F00;
-               tmp |= (rgb565_to_gray4(data[4*i+3], var) << 12) & 0xF000;
-
-               par->board->set_hdb(par, tmp);
-               par->board->set_ctl(par, AUOK190X_I80_WR, 1);
-       }
-
-       return 0;
-}
-
-static int auok190x_issue_pixels_gray8(struct auok190xfb_par *par, int size,
-                                      u16 *data)
-{
-       struct device *dev = par->info->device;
-       int i;
-       u16 tmp;
-
-       if (size & 3) {
-               dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
-                       size);
-               return -EINVAL;
-       }
-
-       for (i = 0; i < (size >> 1); i++) {
-               par->board->set_ctl(par, AUOK190X_I80_WR, 0);
-
-               /* simple reduction of 8bit staticgray to 4bit gray
-                * combines 4 * 4bit pixel values into a 16bit value
-                */
-               tmp  = (data[2*i] & 0xF0) >> 4;
-               tmp |= (data[2*i] & 0xF000) >> 8;
-               tmp |= (data[2*i+1] & 0xF0) << 4;
-               tmp |= (data[2*i+1] & 0xF000);
-
-               par->board->set_hdb(par, tmp);
-               par->board->set_ctl(par, AUOK190X_I80_WR, 1);
-       }
-
-       return 0;
-}
-
-static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
-                                u16 *data)
-{
-       struct fb_info *info = par->info;
-       struct device *dev = par->info->device;
-
-       if (info->var.bits_per_pixel == 8 && info->var.grayscale)
-               auok190x_issue_pixels_gray8(par, size, data);
-       else if (info->var.bits_per_pixel == 16)
-               auok190x_issue_pixels_rgb565(par, size, data);
-       else
-               dev_err(dev, "unsupported color mode (bits: %d, gray: %d)\n",
-                       info->var.bits_per_pixel, info->var.grayscale);
-
-       return 0;
-}
-
-static u16 auok190x_read_data(struct auok190xfb_par *par)
-{
-       u16 data;
-
-       par->board->set_ctl(par, AUOK190X_I80_OE, 0);
-       data = par->board->get_hdb(par);
-       par->board->set_ctl(par, AUOK190X_I80_OE, 1);
-
-       return data;
-}
-
-/*
- * Command interface for the controller drivers
- */
-
-void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
-{
-       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
-       auok190x_issue_cmd(par, data);
-       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
-}
-EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
-
-void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
-                                 int argc, u16 *argv)
-{
-       int i;
-
-       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
-       auok190x_issue_cmd(par, cmd);
-
-       for (i = 0; i < argc; i++)
-               auok190x_issue_data(par, argv[i]);
-       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
-}
-EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
-
-int auok190x_send_command(struct auok190xfb_par *par, u16 data)
-{
-       int ret;
-
-       ret = par->board->wait_for_rdy(par);
-       if (ret)
-               return ret;
-
-       auok190x_send_command_nowait(par, data);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(auok190x_send_command);
-
-int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
-                          int argc, u16 *argv)
-{
-       int ret;
-
-       ret = par->board->wait_for_rdy(par);
-       if (ret)
-               return ret;
-
-       auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
-
-int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
-                          int argc, u16 *argv)
-{
-       int i, ret;
-
-       ret = par->board->wait_for_rdy(par);
-       if (ret)
-               return ret;
-
-       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
-       auok190x_issue_cmd(par, cmd);
-
-       for (i = 0; i < argc; i++)
-               argv[i] = auok190x_read_data(par);
-       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
-
-void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
-                                 int argc, u16 *argv, int size, u16 *data)
-{
-       int i;
-
-       par->board->set_ctl(par, AUOK190X_I80_CS, 0);
-
-       auok190x_issue_cmd(par, cmd);
-
-       for (i = 0; i < argc; i++)
-               auok190x_issue_data(par, argv[i]);
-
-       auok190x_issue_pixels(par, size, data);
-
-       par->board->set_ctl(par, AUOK190X_I80_CS, 1);
-}
-EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
-
-int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
-                                 int argc, u16 *argv, int size, u16 *data)
-{
-       int ret;
-
-       ret = par->board->wait_for_rdy(par);
-       if (ret)
-               return ret;
-
-       auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
-
-/*
- * fbdefio callbacks - common on both controllers.
- */
-
-static void auok190xfb_dpy_first_io(struct fb_info *info)
-{
-       /* tell runtime-pm that we wish to use the device in a short time */
-       pm_runtime_get(info->device);
-}
-
-/* this is called back from the deferred io workqueue */
-static void auok190xfb_dpy_deferred_io(struct fb_info *info,
-                               struct list_head *pagelist)
-{
-       struct fb_deferred_io *fbdefio = info->fbdefio;
-       struct auok190xfb_par *par = info->par;
-       u16 line_length = info->fix.line_length;
-       u16 yres = info->var.yres;
-       u16 y1 = 0, h = 0;
-       int prev_index = -1;
-       struct page *cur;
-       int h_inc;
-       int threshold;
-
-       if (!list_empty(pagelist))
-               /* the device resume should've been requested through first_io,
-                * if the resume did not finish until now, wait for it.
-                */
-               pm_runtime_barrier(info->device);
-       else
-               /* We reached this via the fsync or some other way.
-                * In either case the first_io function did not run,
-                * so we runtime_resume the device here synchronously.
-                */
-               pm_runtime_get_sync(info->device);
-
-       /* Do a full screen update every n updates to prevent
-        * excessive darkening of the Sipix display.
-        * If we do this, there is no need to walk the pages.
-        */
-       if (par->need_refresh(par)) {
-               par->update_all(par);
-               goto out;
-       }
-
-       /* height increment is fixed per page */
-       h_inc = DIV_ROUND_UP(PAGE_SIZE , line_length);
-
-       /* calculate number of pages from pixel height */
-       threshold = par->consecutive_threshold / h_inc;
-       if (threshold < 1)
-               threshold = 1;
-
-       /* walk the written page list and swizzle the data */
-       list_for_each_entry(cur, &fbdefio->pagelist, lru) {
-               if (prev_index < 0) {
-                       /* just starting so assign first page */
-                       y1 = (cur->index << PAGE_SHIFT) / line_length;
-                       h = h_inc;
-               } else if ((cur->index - prev_index) <= threshold) {
-                       /* page is within our threshold for single updates */
-                       h += h_inc * (cur->index - prev_index);
-               } else {
-                       /* page not consecutive, issue previous update first */
-                       par->update_partial(par, y1, y1 + h);
-
-                       /* start over with our non consecutive page */
-                       y1 = (cur->index << PAGE_SHIFT) / line_length;
-                       h = h_inc;
-               }
-               prev_index = cur->index;
-       }
-
-       /* if we still have any pages to update we do so now */
-       if (h >= yres)
-               /* its a full screen update, just do it */
-               par->update_all(par);
-       else
-               par->update_partial(par, y1, min((u16) (y1 + h), yres));
-
-out:
-       pm_runtime_mark_last_busy(info->device);
-       pm_runtime_put_autosuspend(info->device);
-}
-
-/*
- * framebuffer operations
- */
-
-/*
- * this is the slow path from userspace. they can seek and write to
- * the fb. it's inefficient to do anything less than a full screen draw
- */
-static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
-                               size_t count, loff_t *ppos)
-{
-       struct auok190xfb_par *par = info->par;
-       unsigned long p = *ppos;
-       void *dst;
-       int err = 0;
-       unsigned long total_size;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return -EPERM;
-
-       total_size = info->fix.smem_len;
-
-       if (p > total_size)
-               return -EFBIG;
-
-       if (count > total_size) {
-               err = -EFBIG;
-               count = total_size;
-       }
-
-       if (count + p > total_size) {
-               if (!err)
-                       err = -ENOSPC;
-
-               count = total_size - p;
-       }
-
-       dst = (void *)(info->screen_base + p);
-
-       if (copy_from_user(dst, buf, count))
-               err = -EFAULT;
-
-       if  (!err)
-               *ppos += count;
-
-       par->update_all(par);
-
-       return (err) ? err : count;
-}
-
-static void auok190xfb_fillrect(struct fb_info *info,
-                                  const struct fb_fillrect *rect)
-{
-       struct auok190xfb_par *par = info->par;
-
-       sys_fillrect(info, rect);
-
-       par->update_all(par);
-}
-
-static void auok190xfb_copyarea(struct fb_info *info,
-                                  const struct fb_copyarea *area)
-{
-       struct auok190xfb_par *par = info->par;
-
-       sys_copyarea(info, area);
-
-       par->update_all(par);
-}
-
-static void auok190xfb_imageblit(struct fb_info *info,
-                               const struct fb_image *image)
-{
-       struct auok190xfb_par *par = info->par;
-
-       sys_imageblit(info, image);
-
-       par->update_all(par);
-}
-
-static int auok190xfb_check_var(struct fb_var_screeninfo *var,
-                                  struct fb_info *info)
-{
-       struct device *dev = info->device;
-       struct auok190xfb_par *par = info->par;
-       struct panel_info *panel = &panel_table[par->resolution];
-       int size;
-
-       /*
-        * Color depth
-        */
-
-       if (var->bits_per_pixel == 8 && var->grayscale == 1) {
-               /*
-                * For 8-bit grayscale, R, G, and B offset are equal.
-                */
-               var->red.length = 8;
-               var->red.offset = 0;
-               var->red.msb_right = 0;
-
-               var->green.length = 8;
-               var->green.offset = 0;
-               var->green.msb_right = 0;
-
-               var->blue.length = 8;
-               var->blue.offset = 0;
-               var->blue.msb_right = 0;
-
-               var->transp.length = 0;
-               var->transp.offset = 0;
-               var->transp.msb_right = 0;
-       } else if (var->bits_per_pixel == 16) {
-               var->red.length = 5;
-               var->red.offset = 11;
-               var->red.msb_right = 0;
-
-               var->green.length = 6;
-               var->green.offset = 5;
-               var->green.msb_right = 0;
-
-               var->blue.length = 5;
-               var->blue.offset = 0;
-               var->blue.msb_right = 0;
-
-               var->transp.length = 0;
-               var->transp.offset = 0;
-               var->transp.msb_right = 0;
-       } else {
-               dev_warn(dev, "unsupported color mode (bits: %d, grayscale: %d)\n",
-                       info->var.bits_per_pixel, info->var.grayscale);
-               return -EINVAL;
-       }
-
-       /*
-        * Dimensions
-        */
-
-       switch (var->rotate) {
-       case FB_ROTATE_UR:
-       case FB_ROTATE_UD:
-               var->xres = panel->w;
-               var->yres = panel->h;
-               break;
-       case FB_ROTATE_CW:
-       case FB_ROTATE_CCW:
-               var->xres = panel->h;
-               var->yres = panel->w;
-               break;
-       default:
-               dev_dbg(dev, "Invalid rotation request\n");
-               return -EINVAL;
-       }
-
-       var->xres_virtual = var->xres;
-       var->yres_virtual = var->yres;
-
-       /*
-        *  Memory limit
-        */
-
-       size = var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8;
-       if (size > info->fix.smem_len) {
-               dev_err(dev, "Memory limit exceeded, requested %dK\n",
-                       size >> 10);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static int auok190xfb_set_fix(struct fb_info *info)
-{
-       struct fb_fix_screeninfo *fix = &info->fix;
-       struct fb_var_screeninfo *var = &info->var;
-
-       fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
-
-       fix->type = FB_TYPE_PACKED_PIXELS;
-       fix->accel = FB_ACCEL_NONE;
-       fix->visual = (var->grayscale) ? FB_VISUAL_STATIC_PSEUDOCOLOR
-                                      : FB_VISUAL_TRUECOLOR;
-       fix->xpanstep = 0;
-       fix->ypanstep = 0;
-       fix->ywrapstep = 0;
-
-       return 0;
-}
-
-static int auok190xfb_set_par(struct fb_info *info)
-{
-       struct auok190xfb_par *par = info->par;
-
-       par->rotation = info->var.rotate;
-       auok190xfb_set_fix(info);
-
-       /* reinit the controller to honor the rotation */
-       par->init(par);
-
-       /* wait for init to complete */
-       par->board->wait_for_rdy(par);
-
-       return 0;
-}
-
-static struct fb_ops auok190xfb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_read        = fb_sys_read,
-       .fb_write       = auok190xfb_write,
-       .fb_fillrect    = auok190xfb_fillrect,
-       .fb_copyarea    = auok190xfb_copyarea,
-       .fb_imageblit   = auok190xfb_imageblit,
-       .fb_check_var   = auok190xfb_check_var,
-       .fb_set_par     = auok190xfb_set_par,
-};
-
-/*
- * Controller-functions common to both K1900 and K1901
- */
-
-static int auok190x_read_temperature(struct auok190xfb_par *par)
-{
-       struct device *dev = par->info->device;
-       u16 data[4];
-       int temp;
-
-       pm_runtime_get_sync(dev);
-
-       mutex_lock(&(par->io_lock));
-
-       auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
-
-       mutex_unlock(&(par->io_lock));
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-
-       /* sanitize and split of half-degrees for now */
-       temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
-
-       /* handle positive and negative temperatures */
-       if (temp >= 201)
-               return (255 - temp + 1) * (-1);
-       else
-               return temp;
-}
-
-static void auok190x_identify(struct auok190xfb_par *par)
-{
-       struct device *dev = par->info->device;
-       u16 data[4];
-
-       pm_runtime_get_sync(dev);
-
-       mutex_lock(&(par->io_lock));
-
-       auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
-
-       mutex_unlock(&(par->io_lock));
-
-       par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
-
-       par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
-       par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
-       par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
-
-       par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
-       par->lut_version = AUOK190X_VERSION_LUT(data[3]);
-
-       dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
-               par->panel_size_int, par->panel_size_float, par->panel_model,
-               par->epd_type, par->tcon_version, par->lut_version);
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-}
-
-/*
- * Sysfs functions
- */
-
-static ssize_t update_mode_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct fb_info *info = dev_get_drvdata(dev);
-       struct auok190xfb_par *par = info->par;
-
-       return sprintf(buf, "%d\n", par->update_mode);
-}
-
-static ssize_t update_mode_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct fb_info *info = dev_get_drvdata(dev);
-       struct auok190xfb_par *par = info->par;
-       int mode, ret;
-
-       ret = kstrtoint(buf, 10, &mode);
-       if (ret)
-               return ret;
-
-       par->update_mode = mode;
-
-       /* if we enter a better mode, do a full update */
-       if (par->last_mode > 1 && mode < par->last_mode)
-               par->update_all(par);
-
-       return count;
-}
-
-static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct fb_info *info = dev_get_drvdata(dev);
-       struct auok190xfb_par *par = info->par;
-
-       return sprintf(buf, "%d\n", par->flash);
-}
-
-static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       struct fb_info *info = dev_get_drvdata(dev);
-       struct auok190xfb_par *par = info->par;
-       int flash, ret;
-
-       ret = kstrtoint(buf, 10, &flash);
-       if (ret)
-               return ret;
-
-       if (flash > 0)
-               par->flash = 1;
-       else
-               par->flash = 0;
-
-       return count;
-}
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
-                        char *buf)
-{
-       struct fb_info *info = dev_get_drvdata(dev);
-       struct auok190xfb_par *par = info->par;
-       int temp;
-
-       temp = auok190x_read_temperature(par);
-       return sprintf(buf, "%d\n", temp);
-}
-
-static DEVICE_ATTR_RW(update_mode);
-static DEVICE_ATTR_RW(flash);
-static DEVICE_ATTR(temp, 0644, temp_show, NULL);
-
-static struct attribute *auok190x_attributes[] = {
-       &dev_attr_update_mode.attr,
-       &dev_attr_flash.attr,
-       &dev_attr_temp.attr,
-       NULL
-};
-
-static const struct attribute_group auok190x_attr_group = {
-       .attrs          = auok190x_attributes,
-};
-
-static int auok190x_power(struct auok190xfb_par *par, bool on)
-{
-       struct auok190x_board *board = par->board;
-       int ret;
-
-       if (on) {
-               /* We should maintain POWER up for at least 80ms before set
-                * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
-                */
-               ret = regulator_enable(par->regulator);
-               if (ret)
-                       return ret;
-
-               msleep(200);
-               gpio_set_value(board->gpio_nrst, 1);
-               gpio_set_value(board->gpio_nsleep, 1);
-               msleep(200);
-       } else {
-               regulator_disable(par->regulator);
-               gpio_set_value(board->gpio_nrst, 0);
-               gpio_set_value(board->gpio_nsleep, 0);
-       }
-
-       return 0;
-}
-
-/*
- * Recovery - powercycle the controller
- */
-
-static void auok190x_recover(struct auok190xfb_par *par)
-{
-       struct device *dev = par->info->device;
-
-       auok190x_power(par, 0);
-       msleep(100);
-       auok190x_power(par, 1);
-
-       /* after powercycling the device, it's always active */
-       pm_runtime_set_active(dev);
-       par->standby = 0;
-
-       par->init(par);
-
-       /* wait for init to complete */
-       par->board->wait_for_rdy(par);
-}
-
-/*
- * Power-management
- */
-static int __maybe_unused auok190x_runtime_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fb_info *info = platform_get_drvdata(pdev);
-       struct auok190xfb_par *par = info->par;
-       struct auok190x_board *board = par->board;
-       u16 standby_param;
-
-       /* take and keep the lock until we are resumed, as the controller
-        * will never reach the non-busy state when in standby mode
-        */
-       mutex_lock(&(par->io_lock));
-
-       if (par->standby) {
-               dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
-               mutex_unlock(&(par->io_lock));
-               return 0;
-       }
-
-       /* according to runtime_pm.txt runtime_suspend only means, that the
-        * device will not process data and will not communicate with the CPU
-        * As we hold the lock, this stays true even without standby
-        */
-       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
-               dev_dbg(dev, "runtime suspend without standby\n");
-               goto finish;
-       } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
-               /* for some TCON versions STANDBY expects a parameter (0) but
-                * it seems the real tcon version has to be determined yet.
-                */
-               dev_dbg(dev, "runtime suspend with additional empty param\n");
-               standby_param = 0;
-               auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
-                                     &standby_param);
-       } else {
-               dev_dbg(dev, "runtime suspend without param\n");
-               auok190x_send_command(par, AUOK190X_CMD_STANDBY);
-       }
-
-       msleep(64);
-
-finish:
-       par->standby = 1;
-
-       return 0;
-}
-
-static int __maybe_unused auok190x_runtime_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fb_info *info = platform_get_drvdata(pdev);
-       struct auok190xfb_par *par = info->par;
-       struct auok190x_board *board = par->board;
-
-       if (!par->standby) {
-               dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
-               return 0;
-       }
-
-       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
-               dev_dbg(dev, "runtime resume without standby\n");
-       } else {
-               /* when in standby, controller is always busy
-                * and only accepts the wakeup command
-                */
-               dev_dbg(dev, "runtime resume from standby\n");
-               auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
-
-               msleep(160);
-
-               /* wait for the controller to be ready and release the lock */
-               board->wait_for_rdy(par);
-       }
-
-       par->standby = 0;
-
-       mutex_unlock(&(par->io_lock));
-
-       return 0;
-}
-
-static int __maybe_unused auok190x_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fb_info *info = platform_get_drvdata(pdev);
-       struct auok190xfb_par *par = info->par;
-       struct auok190x_board *board = par->board;
-       int ret;
-
-       dev_dbg(dev, "suspend\n");
-       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
-               /* suspend via powering off the ic */
-               dev_dbg(dev, "suspend with broken standby\n");
-
-               auok190x_power(par, 0);
-       } else {
-               dev_dbg(dev, "suspend using sleep\n");
-
-               /* the sleep state can only be entered from the standby state.
-                * pm_runtime_get_noresume gets called before the suspend call.
-                * So the devices usage count is >0 but it is not necessarily
-                * active.
-                */
-               if (!pm_runtime_status_suspended(dev)) {
-                       ret = auok190x_runtime_suspend(dev);
-                       if (ret < 0) {
-                               dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
-                                       ret);
-                               return ret;
-                       }
-                       par->manual_standby = 1;
-               }
-
-               gpio_direction_output(board->gpio_nsleep, 0);
-       }
-
-       msleep(100);
-
-       return 0;
-}
-
-static int __maybe_unused auok190x_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fb_info *info = platform_get_drvdata(pdev);
-       struct auok190xfb_par *par = info->par;
-       struct auok190x_board *board = par->board;
-
-       dev_dbg(dev, "resume\n");
-       if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
-               dev_dbg(dev, "resume with broken standby\n");
-
-               auok190x_power(par, 1);
-
-               par->init(par);
-       } else {
-               dev_dbg(dev, "resume from sleep\n");
-
-               /* device should be in runtime suspend when we were suspended
-                * and pm_runtime_put_sync gets called after this function.
-                * So there is no need to touch the standby mode here at all.
-                */
-               gpio_direction_output(board->gpio_nsleep, 1);
-               msleep(100);
-
-               /* an additional init call seems to be necessary after sleep */
-               auok190x_runtime_resume(dev);
-               par->init(par);
-
-               /* if we were runtime-suspended before, suspend again*/
-               if (!par->manual_standby)
-                       auok190x_runtime_suspend(dev);
-               else
-                       par->manual_standby = 0;
-       }
-
-       return 0;
-}
-
-const struct dev_pm_ops auok190x_pm = {
-       SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
-                          NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
-};
-EXPORT_SYMBOL_GPL(auok190x_pm);
-
-/*
- * Common probe and remove code
- */
-
-int auok190x_common_probe(struct platform_device *pdev,
-                         struct auok190x_init_data *init)
-{
-       struct auok190x_board *board = init->board;
-       struct auok190xfb_par *par;
-       struct fb_info *info;
-       struct panel_info *panel;
-       int videomemorysize, ret;
-       unsigned char *videomemory;
-
-       /* check board contents */
-       if (!board->init || !board->cleanup || !board->wait_for_rdy
-           || !board->set_ctl || !board->set_hdb || !board->get_hdb
-           || !board->setup_irq)
-               return -EINVAL;
-
-       info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
-       if (!info)
-               return -ENOMEM;
-
-       par = info->par;
-       par->info = info;
-       par->board = board;
-       par->recover = auok190x_recover;
-       par->update_partial = init->update_partial;
-       par->update_all = init->update_all;
-       par->need_refresh = init->need_refresh;
-       par->init = init->init;
-
-       /* init update modes */
-       par->update_cnt = 0;
-       par->update_mode = -1;
-       par->last_mode = -1;
-       par->flash = 0;
-
-       par->regulator = regulator_get(info->device, "vdd");
-       if (IS_ERR(par->regulator)) {
-               ret = PTR_ERR(par->regulator);
-               dev_err(info->device, "Failed to get regulator: %d\n", ret);
-               goto err_reg;
-       }
-
-       ret = board->init(par);
-       if (ret) {
-               dev_err(info->device, "board init failed, %d\n", ret);
-               goto err_board;
-       }
-
-       ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
-       if (ret) {
-               dev_err(info->device, "could not request sleep gpio, %d\n",
-                       ret);
-               goto err_gpio1;
-       }
-
-       ret = gpio_direction_output(board->gpio_nsleep, 0);
-       if (ret) {
-               dev_err(info->device, "could not set sleep gpio, %d\n", ret);
-               goto err_gpio2;
-       }
-
-       ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
-       if (ret) {
-               dev_err(info->device, "could not request reset gpio, %d\n",
-                       ret);
-               goto err_gpio2;
-       }
-
-       ret = gpio_direction_output(board->gpio_nrst, 0);
-       if (ret) {
-               dev_err(info->device, "could not set reset gpio, %d\n", ret);
-               goto err_gpio3;
-       }
-
-       ret = auok190x_power(par, 1);
-       if (ret) {
-               dev_err(info->device, "could not power on the device, %d\n",
-                       ret);
-               goto err_gpio3;
-       }
-
-       mutex_init(&par->io_lock);
-
-       init_waitqueue_head(&par->waitq);
-
-       ret = par->board->setup_irq(par->info);
-       if (ret) {
-               dev_err(info->device, "could not setup ready-irq, %d\n", ret);
-               goto err_irq;
-       }
-
-       /* wait for init to complete */
-       par->board->wait_for_rdy(par);
-
-       /*
-        * From here on the controller can talk to us
-        */
-
-       /* initialise fix, var, resolution and rotation */
-
-       strlcpy(info->fix.id, init->id, 16);
-       info->var.bits_per_pixel = 8;
-       info->var.grayscale = 1;
-
-       panel = &panel_table[board->resolution];
-
-       par->resolution = board->resolution;
-       par->rotation = 0;
-
-       /* videomemory handling */
-
-       videomemorysize = roundup((panel->w * panel->h) * 2, PAGE_SIZE);
-       videomemory = vzalloc(videomemorysize);
-       if (!videomemory) {
-               ret = -ENOMEM;
-               goto err_irq;
-       }
-
-       info->screen_base = (char *)videomemory;
-       info->fix.smem_len = videomemorysize;
-
-       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
-       info->fbops = &auok190xfb_ops;
-
-       ret = auok190xfb_check_var(&info->var, info);
-       if (ret)
-               goto err_defio;
-
-       auok190xfb_set_fix(info);
-
-       /* deferred io init */
-
-       info->fbdefio = devm_kzalloc(info->device,
-                                    sizeof(struct fb_deferred_io),
-                                    GFP_KERNEL);
-       if (!info->fbdefio) {
-               dev_err(info->device, "Failed to allocate memory\n");
-               ret = -ENOMEM;
-               goto err_defio;
-       }
-
-       dev_dbg(info->device, "targeting %d frames per second\n", board->fps);
-       info->fbdefio->delay = HZ / board->fps;
-       info->fbdefio->first_io = auok190xfb_dpy_first_io,
-       info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
-       fb_deferred_io_init(info);
-
-       /* color map */
-
-       ret = fb_alloc_cmap(&info->cmap, 256, 0);
-       if (ret < 0) {
-               dev_err(info->device, "Failed to allocate colormap\n");
-               goto err_cmap;
-       }
-
-       /* controller init */
-
-       par->consecutive_threshold = 100;
-       par->init(par);
-       auok190x_identify(par);
-
-       platform_set_drvdata(pdev, info);
-
-       ret = register_framebuffer(info);
-       if (ret < 0)
-               goto err_regfb;
-
-       ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
-       if (ret)
-               goto err_sysfs;
-
-       dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
-                info->node, info->var.xres, info->var.yres,
-                videomemorysize >> 10);
-
-       /* increase autosuspend_delay when we use alternative methods
-        * for runtime_pm
-        */
-       par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
-                                       ? 1000 : 200;
-
-       pm_runtime_set_active(info->device);
-       pm_runtime_enable(info->device);
-       pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
-       pm_runtime_use_autosuspend(info->device);
-
-       return 0;
-
-err_sysfs:
-       unregister_framebuffer(info);
-err_regfb:
-       fb_dealloc_cmap(&info->cmap);
-err_cmap:
-       fb_deferred_io_cleanup(info);
-err_defio:
-       vfree((void *)info->screen_base);
-err_irq:
-       auok190x_power(par, 0);
-err_gpio3:
-       gpio_free(board->gpio_nrst);
-err_gpio2:
-       gpio_free(board->gpio_nsleep);
-err_gpio1:
-       board->cleanup(par);
-err_board:
-       regulator_put(par->regulator);
-err_reg:
-       framebuffer_release(info);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(auok190x_common_probe);
-
-int  auok190x_common_remove(struct platform_device *pdev)
-{
-       struct fb_info *info = platform_get_drvdata(pdev);
-       struct auok190xfb_par *par = info->par;
-       struct auok190x_board *board = par->board;
-
-       pm_runtime_disable(info->device);
-
-       sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
-
-       unregister_framebuffer(info);
-
-       fb_dealloc_cmap(&info->cmap);
-
-       fb_deferred_io_cleanup(info);
-
-       vfree((void *)info->screen_base);
-
-       auok190x_power(par, 0);
-
-       gpio_free(board->gpio_nrst);
-       gpio_free(board->gpio_nsleep);
-
-       board->cleanup(par);
-
-       regulator_put(par->regulator);
-
-       framebuffer_release(info);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(auok190x_common_remove);
-
-MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
-MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/auo_k190x.h b/drivers/video/fbdev/auo_k190x.h
deleted file mode 100644 (file)
index e35af1f..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Private common definitions for AUO-K190X framebuffer drivers
- *
- * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * I80 interface specific defines
- */
-
-#define AUOK190X_I80_CS                        0x01
-#define AUOK190X_I80_DC                        0x02
-#define AUOK190X_I80_WR                        0x03
-#define AUOK190X_I80_OE                        0x04
-
-/*
- * AUOK190x commands, common to both controllers
- */
-
-#define AUOK190X_CMD_INIT              0x0000
-#define AUOK190X_CMD_STANDBY           0x0001
-#define AUOK190X_CMD_WAKEUP            0x0002
-#define AUOK190X_CMD_TCON_RESET                0x0003
-#define AUOK190X_CMD_DATA_STOP         0x1002
-#define AUOK190X_CMD_LUT_START         0x1003
-#define AUOK190X_CMD_DISP_REFRESH      0x1004
-#define AUOK190X_CMD_DISP_RESET                0x1005
-#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
-#define AUOK190X_CMD_PRE_DISPLAY_STOP  0x100F
-#define AUOK190X_CMD_FLASH_W           0x2000
-#define AUOK190X_CMD_FLASH_E           0x2001
-#define AUOK190X_CMD_FLASH_STS         0x2002
-#define AUOK190X_CMD_FRAMERATE         0x3000
-#define AUOK190X_CMD_READ_VERSION      0x4000
-#define AUOK190X_CMD_READ_STATUS       0x4001
-#define AUOK190X_CMD_READ_LUT          0x4003
-#define AUOK190X_CMD_DRIVERTIMING      0x5000
-#define AUOK190X_CMD_LBALANCE          0x5001
-#define AUOK190X_CMD_AGINGMODE         0x6000
-#define AUOK190X_CMD_AGINGEXIT         0x6001
-
-/*
- * Common settings for AUOK190X_CMD_INIT
- */
-
-#define AUOK190X_INIT_DATA_FILTER      (0 << 12)
-#define AUOK190X_INIT_DATA_BYPASS      (1 << 12)
-#define AUOK190X_INIT_INVERSE_WHITE    (0 << 9)
-#define AUOK190X_INIT_INVERSE_BLACK    (1 << 9)
-#define AUOK190X_INIT_SCAN_DOWN                (0 << 1)
-#define AUOK190X_INIT_SCAN_UP          (1 << 1)
-#define AUOK190X_INIT_SHIFT_LEFT       (0 << 0)
-#define AUOK190X_INIT_SHIFT_RIGHT      (1 << 0)
-
-/* Common bits to pixels
- *   Mode      15-12   11-8    7-4     3-0
- *   format0   4       3       2       1
- *   format1   3       4       1       2
- */
-
-#define AUOK190X_INIT_FORMAT0          0
-#define AUOK190X_INIT_FORMAT1          (1 << 6)
-
-/*
- * settings for AUOK190X_CMD_RESET
- */
-
-#define AUOK190X_RESET_TCON            (0 << 0)
-#define AUOK190X_RESET_NORMAL          (1 << 0)
-#define AUOK190X_RESET_PON             (1 << 1)
-
-/*
- * AUOK190X_CMD_VERSION
- */
-
-#define AUOK190X_VERSION_TEMP_MASK             (0x1ff)
-#define AUOK190X_VERSION_EPD_MASK              (0xff)
-#define AUOK190X_VERSION_SIZE_INT(_val)                ((_val & 0xfc00) >> 10)
-#define AUOK190X_VERSION_SIZE_FLOAT(_val)      ((_val & 0x3c0) >> 6)
-#define AUOK190X_VERSION_MODEL(_val)           (_val & 0x3f)
-#define AUOK190X_VERSION_LUT(_val)             (_val & 0xff)
-#define AUOK190X_VERSION_TCON(_val)            ((_val & 0xff00) >> 8)
-
-/*
- * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
- */
-
-#define AUOK190X_UPDATE_MODE(_res)             ((_res & 0x7) << 12)
-#define AUOK190X_UPDATE_NONFLASH               (1 << 15)
-
-/*
- * track panel specific parameters for common init
- */
-
-struct auok190x_init_data {
-       char *id;
-       struct auok190x_board *board;
-
-       void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
-       void (*update_all)(struct auok190xfb_par *par);
-       bool (*need_refresh)(struct auok190xfb_par *par);
-       void (*init)(struct auok190xfb_par *par);
-};
-
-
-extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
-extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
-extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
-                                        int argc, u16 *argv);
-extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
-                                 int argc, u16 *argv);
-extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
-                                               u16 cmd, int argc, u16 *argv,
-                                               int size, u16 *data);
-extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
-                                       int argc, u16 *argv, int size,
-                                       u16 *data);
-extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
-                                 int argc, u16 *argv);
-
-extern int auok190x_common_probe(struct platform_device *pdev,
-                                struct auok190x_init_data *init);
-extern int auok190x_common_remove(struct platform_device *pdev);
-
-extern const struct dev_pm_ops auok190x_pm;
index 487d5e3..82c20c6 100644 (file)
@@ -37,7 +37,7 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs
 }
 
 /* this is to find and return the vmalloc-ed fb pages */
-static int fb_deferred_io_fault(struct vm_fault *vmf)
+static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
 {
        unsigned long offset;
        struct page *page;
@@ -90,7 +90,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
 
 /* vm_ops->page_mkwrite handler */
-static int fb_deferred_io_mkwrite(struct vm_fault *vmf)
+static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
        struct fb_info *info = vmf->vma->vm_private_data;
index f27697e..ee212be 100644 (file)
@@ -495,10 +495,9 @@ static int modes_setup(struct mmpfb_info *fbi)
        /* put videomode list to info structure */
        videomodes = kcalloc(videomode_num, sizeof(struct fb_videomode),
                             GFP_KERNEL);
-       if (!videomodes) {
-               dev_err(fbi->dev, "can't malloc video modes\n");
+       if (!videomodes)
                return -ENOMEM;
-       }
+
        for (i = 0; i < videomode_num; i++)
                mmpmode_to_fbmode(&videomodes[i], &mmp_modes[i]);
        fb_videomode_to_modelist(videomodes, videomode_num, &info->modelist);
index b6f83d5..fcdbb2d 100644 (file)
@@ -406,12 +406,10 @@ static int path_init(struct mmphw_path_plat *path_plat,
        dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
 
        /* init driver data */
-       path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL);
-       if (!path_info) {
-               dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n",
-                               __func__, config->name);
+       path_info = kzalloc(sizeof(*path_info), GFP_KERNEL);
+       if (!path_info)
                return 0;
-       }
+
        path_info->name = config->name;
        path_info->id = path_plat->id;
        path_info->dev = ctrl->dev;
index 2e50120..fbeeed5 100644 (file)
@@ -1548,7 +1548,7 @@ MODULE_PARM_DESC(noaccel,
                 "(default=0)");
 module_param(noscale, int, 0);
 MODULE_PARM_DESC(noscale,
-                "Disables screen scaleing. (0 or 1=disable) "
+                "Disables screen scaling. (0 or 1=disable) "
                 "(default=0, do scaling)");
 module_param(paneltweak, int, 0);
 MODULE_PARM_DESC(paneltweak,
index a4ee947..e8c748a 100644 (file)
@@ -197,3 +197,7 @@ static struct platform_driver ams_delta_panel_driver = {
 };
 
 module_platform_driver(ams_delta_panel_driver);
+
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("LCD panel support for the Amstrad E3 (Delta) videophone");
+MODULE_LICENSE("GPL");
index 796f463..fd0ac99 100644 (file)
@@ -89,3 +89,7 @@ static struct platform_driver h3_panel_driver = {
 };
 
 module_platform_driver(h3_panel_driver);
+
+MODULE_AUTHOR("Imre Deak");
+MODULE_DESCRIPTION("LCD panel support for the TI OMAP H3 board");
+MODULE_LICENSE("GPL");
index 9d692f5..db4ff1c 100644 (file)
@@ -66,3 +66,7 @@ static struct platform_driver htcherald_panel_driver = {
 };
 
 module_platform_driver(htcherald_panel_driver);
+
+MODULE_AUTHOR("Cory Maccarrone");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LCD panel support for the HTC Herald");
index b284050..1ea775f 100644 (file)
@@ -73,3 +73,7 @@ static struct platform_driver innovator1510_panel_driver = {
 };
 
 module_platform_driver(innovator1510_panel_driver);
+
+MODULE_AUTHOR("Imre Deak");
+MODULE_DESCRIPTION("LCD panel support for the TI OMAP1510 Innovator board");
+MODULE_LICENSE("GPL");
index 1841710..8d0cf68 100644 (file)
@@ -106,3 +106,7 @@ static struct platform_driver innovator1610_panel_driver = {
 };
 
 module_platform_driver(innovator1610_panel_driver);
+
+MODULE_AUTHOR("Imre Deak");
+MODULE_DESCRIPTION("LCD panel support for the TI OMAP1610 Innovator board");
+MODULE_LICENSE("GPL");
index b0be577..9fc43a1 100644 (file)
@@ -93,3 +93,7 @@ static struct platform_driver osk_panel_driver = {
 };
 
 module_platform_driver(osk_panel_driver);
+
+MODULE_AUTHOR("Imre Deak");
+MODULE_DESCRIPTION("LCD panel support for the TI OMAP OSK board");
+MODULE_LICENSE("GPL");
index cef9638..a0e8886 100644 (file)
@@ -59,3 +59,7 @@ static struct platform_driver palmte_panel_driver = {
 };
 
 module_platform_driver(palmte_panel_driver);
+
+MODULE_AUTHOR("Romain Goyet <r.goyet@gmail.com>, Laurent Gonzalez <palmte.linux@free.fr>");
+MODULE_DESCRIPTION("LCD panel support for the Palm Tungsten E");
+MODULE_LICENSE("GPL");
index 627f13d..2c45375 100644 (file)
@@ -72,3 +72,7 @@ static struct platform_driver palmtt_panel_driver = {
 };
 
 module_platform_driver(palmtt_panel_driver);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("LCD panel support for Palm Tungsten|T");
+MODULE_LICENSE("GPL");
index c46d4db..c99a15a 100644 (file)
@@ -66,3 +66,7 @@ static struct platform_driver palmz71_panel_driver = {
 };
 
 module_platform_driver(palmz71_panel_driver);
+
+MODULE_AUTHOR("Romain Goyet, Laurent Gonzalez, Marek Vasut");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LCD panel support for the Palm Zire71");
index 3479a47..585f39e 100644 (file)
@@ -1645,7 +1645,7 @@ static int omapfb_do_probe(struct platform_device *pdev,
                goto cleanup;
        }
 
-       fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL);
+       fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
        if (fbdev == NULL) {
                dev_err(&pdev->dev,
                        "unable to allocate memory for device info\n");
index e6226ae..3bf154e 100644 (file)
@@ -5,6 +5,7 @@ menuconfig FB_OMAP2
         tristate "OMAP2+ frame buffer support"
         depends on FB
         depends on DRM_OMAP = n
+       depends on GPIOLIB
 
         select FB_OMAP2_DSS
        select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
index bef4315..87497a0 100644 (file)
@@ -387,8 +387,7 @@ static void dsicm_get_resolution(struct omap_dss_device *dssdev,
 static ssize_t dsicm_num_errors_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+       struct panel_drv_data *ddata = dev_get_drvdata(dev);
        struct omap_dss_device *in = ddata->in;
        u8 errors = 0;
        int r;
@@ -419,8 +418,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
 static ssize_t dsicm_hw_revision_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+       struct panel_drv_data *ddata = dev_get_drvdata(dev);
        struct omap_dss_device *in = ddata->in;
        u8 id1, id2, id3;
        int r;
@@ -451,8 +449,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
                struct device_attribute *attr,
                const char *buf, size_t count)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+       struct panel_drv_data *ddata = dev_get_drvdata(dev);
        struct omap_dss_device *in = ddata->in;
        unsigned long t;
        int r;
@@ -486,8 +483,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+       struct panel_drv_data *ddata = dev_get_drvdata(dev);
        unsigned t;
 
        mutex_lock(&ddata->lock);
@@ -501,8 +497,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
                struct device_attribute *attr,
                const char *buf, size_t count)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+       struct panel_drv_data *ddata = dev_get_drvdata(dev);
        struct omap_dss_device *in = ddata->in;
        unsigned long t;
        int r;
@@ -533,8 +528,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+       struct panel_drv_data *ddata = dev_get_drvdata(dev);
        unsigned t;
 
        mutex_lock(&ddata->lock);
index c3d49e1..76722a5 100644 (file)
@@ -2115,12 +2115,10 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp,
        if (ret)
                s = "color-tft";
 
-       for (i = 0; lcd_types[i]; i++)
-               if (!strcmp(s, lcd_types[i]))
-                       break;
-       if (!i || !lcd_types[i]) {
+       i = match_string(lcd_types, -1, s);
+       if (i < 0) {
                dev_err(dev, "lcd-type %s is unknown\n", s);
-               return -EINVAL;
+               return i;
        }
        info->lcd_conn |= LCD_CONN_TYPE(i);
        info->lcd_conn |= LCD_CONN_WIDTH(bus_width);
index c204683..c09d742 100644 (file)
@@ -1892,11 +1892,11 @@ static int savage_init_hw(struct savagefb_par *par)
        vga_out8(0x3d4, 0x66, par);
        cr66 = vga_in8(0x3d5, par);
        vga_out8(0x3d5, cr66 | 0x02, par);
-       mdelay(10);
+       usleep_range(10000, 11000);
 
        vga_out8(0x3d4, 0x66, par);
        vga_out8(0x3d5, cr66 & ~0x02, par);     /* clear reset flag */
-       mdelay(10);
+       usleep_range(10000, 11000);
 
 
        /*
@@ -1906,11 +1906,11 @@ static int savage_init_hw(struct savagefb_par *par)
        vga_out8(0x3d4, 0x3f, par);
        cr3f = vga_in8(0x3d5, par);
        vga_out8(0x3d5, cr3f | 0x08, par);
-       mdelay(10);
+       usleep_range(10000, 11000);
 
        vga_out8(0x3d4, 0x3f, par);
        vga_out8(0x3d5, cr3f & ~0x08, par);     /* clear reset flags */
-       mdelay(10);
+       usleep_range(10000, 11000);
 
        /* Savage ramdac speeds */
        par->numClocks = 4;
index c3a4650..dc46be3 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/vmalloc.h>
 
 #include <video/sh_mobile_lcdc.h>
-#include <video/sh_mobile_meram.h>
 
 #include "sh_mobile_lcdcfb.h"
 
@@ -217,7 +216,6 @@ struct sh_mobile_lcdc_priv {
        struct notifier_block notifier;
        int started;
        int forced_fourcc; /* 2 channel LCDC must share fourcc setting */
-       struct sh_mobile_meram_info *meram_dev;
 };
 
 /* -----------------------------------------------------------------------------
@@ -346,16 +344,12 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
                if (priv->dot_clk)
                        clk_prepare_enable(priv->dot_clk);
                pm_runtime_get_sync(priv->dev);
-               if (priv->meram_dev && priv->meram_dev->pdev)
-                       pm_runtime_get_sync(&priv->meram_dev->pdev->dev);
        }
 }
 
 static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv)
 {
        if (atomic_sub_return(1, &priv->hw_usecnt) == -1) {
-               if (priv->meram_dev && priv->meram_dev->pdev)
-                       pm_runtime_put_sync(&priv->meram_dev->pdev->dev);
                pm_runtime_put(priv->dev);
                if (priv->dot_clk)
                        clk_disable_unprepare(priv->dot_clk);
@@ -1073,7 +1067,6 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
 
 static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
 {
-       struct sh_mobile_meram_info *mdev = priv->meram_dev;
        struct sh_mobile_lcdc_chan *ch;
        unsigned long tmp;
        int ret;
@@ -1106,9 +1099,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
 
        /* Compute frame buffer base address and pitch for each channel. */
        for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
-               int pixelformat;
-               void *cache;
-
                ch = &priv->ch[k];
                if (!ch->enabled)
                        continue;
@@ -1117,45 +1107,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
                ch->base_addr_c = ch->dma_handle
                                + ch->xres_virtual * ch->yres_virtual;
                ch->line_size = ch->pitch;
-
-               /* Enable MERAM if possible. */
-               if (mdev == NULL || ch->cfg->meram_cfg == NULL)
-                       continue;
-
-               /* Free the allocated MERAM cache. */
-               if (ch->cache) {
-                       sh_mobile_meram_cache_free(mdev, ch->cache);
-                       ch->cache = NULL;
-               }
-
-               switch (ch->format->fourcc) {
-               case V4L2_PIX_FMT_NV12:
-               case V4L2_PIX_FMT_NV21:
-               case V4L2_PIX_FMT_NV16:
-               case V4L2_PIX_FMT_NV61:
-                       pixelformat = SH_MOBILE_MERAM_PF_NV;
-                       break;
-               case V4L2_PIX_FMT_NV24:
-               case V4L2_PIX_FMT_NV42:
-                       pixelformat = SH_MOBILE_MERAM_PF_NV24;
-                       break;
-               case V4L2_PIX_FMT_RGB565:
-               case V4L2_PIX_FMT_BGR24:
-               case V4L2_PIX_FMT_BGR32:
-               default:
-                       pixelformat = SH_MOBILE_MERAM_PF_RGB;
-                       break;
-               }
-
-               cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg,
-                                       ch->pitch, ch->yres, pixelformat,
-                                       &ch->line_size);
-               if (!IS_ERR(cache)) {
-                       sh_mobile_meram_cache_update(mdev, cache,
-                                       ch->base_addr_y, ch->base_addr_c,
-                                       &ch->base_addr_y, &ch->base_addr_c);
-                       ch->cache = cache;
-               }
        }
 
        for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) {
@@ -1223,13 +1174,6 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
                }
 
                sh_mobile_lcdc_display_off(ch);
-
-               /* Free the MERAM cache. */
-               if (ch->cache) {
-                       sh_mobile_meram_cache_free(priv->meram_dev, ch->cache);
-                       ch->cache = NULL;
-               }
-
        }
 
        /* stop the lcdc */
@@ -1851,11 +1795,6 @@ static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var,
        base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual
                    + c_offset;
 
-       if (ch->cache)
-               sh_mobile_meram_cache_update(priv->meram_dev, ch->cache,
-                                            base_addr_y, base_addr_c,
-                                            &base_addr_y, &base_addr_c);
-
        ch->base_addr_y = base_addr_y;
        ch->base_addr_c = base_addr_c;
        ch->pan_y_offset = y_offset;
@@ -2149,10 +2088,8 @@ sh_mobile_lcdc_channel_fb_register(struct sh_mobile_lcdc_chan *ch)
        if (info->fbdefio) {
                ch->sglist = vmalloc(sizeof(struct scatterlist) *
                                     ch->fb_size >> PAGE_SHIFT);
-               if (!ch->sglist) {
-                       dev_err(ch->lcdc->dev, "cannot allocate sglist\n");
+               if (!ch->sglist)
                        return -ENOMEM;
-               }
        }
 
        info->bl_dev = ch->bl;
@@ -2354,8 +2291,7 @@ static int sh_mobile_lcdc_resume(struct device *dev)
 
 static int sh_mobile_lcdc_runtime_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
+       struct sh_mobile_lcdc_priv *priv = dev_get_drvdata(dev);
 
        /* turn off LCDC hardware */
        lcdc_write(priv, _LDCNT1R, 0);
@@ -2365,8 +2301,7 @@ static int sh_mobile_lcdc_runtime_suspend(struct device *dev)
 
 static int sh_mobile_lcdc_runtime_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
+       struct sh_mobile_lcdc_priv *priv = dev_get_drvdata(dev);
 
        __sh_mobile_lcdc_start(priv);
 
@@ -2718,13 +2653,11 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
        }
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               dev_err(&pdev->dev, "cannot allocate device data\n");
+       if (!priv)
                return -ENOMEM;
-       }
 
        priv->dev = &pdev->dev;
-       priv->meram_dev = pdata->meram_dev;
+
        for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
                mutex_init(&priv->ch[i].open_lock);
        platform_set_drvdata(pdev, priv);
index cc52c74..b8e47a8 100644 (file)
@@ -61,7 +61,6 @@ struct sh_mobile_lcdc_chan {
        unsigned long *reg_offs;
        unsigned long ldmt1r_value;
        unsigned long enabled; /* ME and SE in LDCNT2R */
-       void *cache;
 
        struct mutex open_lock;         /* protects the use counter */
        int use_count;
diff --git a/drivers/video/fbdev/sh_mobile_meram.c b/drivers/video/fbdev/sh_mobile_meram.c
deleted file mode 100644 (file)
index baadfb2..0000000
+++ /dev/null
@@ -1,758 +0,0 @@
-/*
- * SuperH Mobile MERAM Driver for SuperH Mobile LCDC Driver
- *
- * Copyright (c) 2011  Damian Hobson-Garcia <dhobsong@igel.co.jp>
- *                      Takanari Hayama <taki@igel.co.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-
-#include <video/sh_mobile_meram.h>
-
-/* -----------------------------------------------------------------------------
- * MERAM registers
- */
-
-#define MEVCR1                 0x4
-#define MEVCR1_RST             (1 << 31)
-#define MEVCR1_WD              (1 << 30)
-#define MEVCR1_AMD1            (1 << 29)
-#define MEVCR1_AMD0            (1 << 28)
-#define MEQSEL1                        0x40
-#define MEQSEL2                        0x44
-
-#define MExxCTL                        0x400
-#define MExxCTL_BV             (1 << 31)
-#define MExxCTL_BSZ_SHIFT      28
-#define MExxCTL_MSAR_MASK      (0x7ff << MExxCTL_MSAR_SHIFT)
-#define MExxCTL_MSAR_SHIFT     16
-#define MExxCTL_NXT_MASK       (0x1f << MExxCTL_NXT_SHIFT)
-#define MExxCTL_NXT_SHIFT      11
-#define MExxCTL_WD1            (1 << 10)
-#define MExxCTL_WD0            (1 << 9)
-#define MExxCTL_WS             (1 << 8)
-#define MExxCTL_CB             (1 << 7)
-#define MExxCTL_WBF            (1 << 6)
-#define MExxCTL_WF             (1 << 5)
-#define MExxCTL_RF             (1 << 4)
-#define MExxCTL_CM             (1 << 3)
-#define MExxCTL_MD_READ                (1 << 0)
-#define MExxCTL_MD_WRITE       (2 << 0)
-#define MExxCTL_MD_ICB_WB      (3 << 0)
-#define MExxCTL_MD_ICB         (4 << 0)
-#define MExxCTL_MD_FB          (7 << 0)
-#define MExxCTL_MD_MASK                (7 << 0)
-#define MExxBSIZE              0x404
-#define MExxBSIZE_RCNT_SHIFT   28
-#define MExxBSIZE_YSZM1_SHIFT  16
-#define MExxBSIZE_XSZM1_SHIFT  0
-#define MExxMNCF               0x408
-#define MExxMNCF_KWBNM_SHIFT   28
-#define MExxMNCF_KRBNM_SHIFT   24
-#define MExxMNCF_BNM_SHIFT     16
-#define MExxMNCF_XBV           (1 << 15)
-#define MExxMNCF_CPL_YCBCR444  (1 << 12)
-#define MExxMNCF_CPL_YCBCR420  (2 << 12)
-#define MExxMNCF_CPL_YCBCR422  (3 << 12)
-#define MExxMNCF_CPL_MSK       (3 << 12)
-#define MExxMNCF_BL            (1 << 2)
-#define MExxMNCF_LNM_SHIFT     0
-#define MExxSARA               0x410
-#define MExxSARB               0x414
-#define MExxSBSIZE             0x418
-#define MExxSBSIZE_HDV         (1 << 31)
-#define MExxSBSIZE_HSZ16       (0 << 28)
-#define MExxSBSIZE_HSZ32       (1 << 28)
-#define MExxSBSIZE_HSZ64       (2 << 28)
-#define MExxSBSIZE_HSZ128      (3 << 28)
-#define MExxSBSIZE_SBSIZZ_SHIFT        0
-
-#define MERAM_MExxCTL_VAL(next, addr)  \
-       ((((next) << MExxCTL_NXT_SHIFT) & MExxCTL_NXT_MASK) | \
-        (((addr) << MExxCTL_MSAR_SHIFT) & MExxCTL_MSAR_MASK))
-#define        MERAM_MExxBSIZE_VAL(rcnt, yszm1, xszm1) \
-       (((rcnt) << MExxBSIZE_RCNT_SHIFT) | \
-        ((yszm1) << MExxBSIZE_YSZM1_SHIFT) | \
-        ((xszm1) << MExxBSIZE_XSZM1_SHIFT))
-
-static const unsigned long common_regs[] = {
-       MEVCR1,
-       MEQSEL1,
-       MEQSEL2,
-};
-#define MERAM_REGS_SIZE ARRAY_SIZE(common_regs)
-
-static const unsigned long icb_regs[] = {
-       MExxCTL,
-       MExxBSIZE,
-       MExxMNCF,
-       MExxSARA,
-       MExxSARB,
-       MExxSBSIZE,
-};
-#define ICB_REGS_SIZE ARRAY_SIZE(icb_regs)
-
-/*
- * sh_mobile_meram_icb - MERAM ICB information
- * @regs: Registers cache
- * @index: ICB index
- * @offset: MERAM block offset
- * @size: MERAM block size in KiB
- * @cache_unit: Bytes to cache per ICB
- * @pixelformat: Video pixel format of the data stored in the ICB
- * @current_reg: Which of Start Address Register A (0) or B (1) is in use
- */
-struct sh_mobile_meram_icb {
-       unsigned long regs[ICB_REGS_SIZE];
-       unsigned int index;
-       unsigned long offset;
-       unsigned int size;
-
-       unsigned int cache_unit;
-       unsigned int pixelformat;
-       unsigned int current_reg;
-};
-
-#define MERAM_ICB_NUM                  32
-
-struct sh_mobile_meram_fb_plane {
-       struct sh_mobile_meram_icb *marker;
-       struct sh_mobile_meram_icb *cache;
-};
-
-struct sh_mobile_meram_fb_cache {
-       unsigned int nplanes;
-       struct sh_mobile_meram_fb_plane planes[2];
-};
-
-/*
- * sh_mobile_meram_priv - MERAM device
- * @base: Registers base address
- * @meram: MERAM physical address
- * @regs: Registers cache
- * @lock: Protects used_icb and icbs
- * @used_icb: Bitmask of used ICBs
- * @icbs: ICBs
- * @pool: Allocation pool to manage the MERAM
- */
-struct sh_mobile_meram_priv {
-       void __iomem *base;
-       unsigned long meram;
-       unsigned long regs[MERAM_REGS_SIZE];
-
-       struct mutex lock;
-       unsigned long used_icb;
-       struct sh_mobile_meram_icb icbs[MERAM_ICB_NUM];
-
-       struct gen_pool *pool;
-};
-
-/* settings */
-#define MERAM_GRANULARITY              1024
-#define MERAM_SEC_LINE                 15
-#define MERAM_LINE_WIDTH               2048
-
-/* -----------------------------------------------------------------------------
- * Registers access
- */
-
-#define MERAM_ICB_OFFSET(base, idx, off)       ((base) + (off) + (idx) * 0x20)
-
-static inline void meram_write_icb(void __iomem *base, unsigned int idx,
-                                  unsigned int off, unsigned long val)
-{
-       iowrite32(val, MERAM_ICB_OFFSET(base, idx, off));
-}
-
-static inline unsigned long meram_read_icb(void __iomem *base, unsigned int idx,
-                                          unsigned int off)
-{
-       return ioread32(MERAM_ICB_OFFSET(base, idx, off));
-}
-
-static inline void meram_write_reg(void __iomem *base, unsigned int off,
-                                  unsigned long val)
-{
-       iowrite32(val, base + off);
-}
-
-static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off)
-{
-       return ioread32(base + off);
-}
-
-/* -----------------------------------------------------------------------------
- * MERAM allocation and free
- */
-
-static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size)
-{
-       return gen_pool_alloc(priv->pool, size);
-}
-
-static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem,
-                      size_t size)
-{
-       gen_pool_free(priv->pool, mem, size);
-}
-
-/* -----------------------------------------------------------------------------
- * LCDC cache planes allocation, init, cleanup and free
- */
-
-/* Allocate ICBs and MERAM for a plane. */
-static int meram_plane_alloc(struct sh_mobile_meram_priv *priv,
-                            struct sh_mobile_meram_fb_plane *plane,
-                            size_t size)
-{
-       unsigned long mem;
-       unsigned long idx;
-
-       idx = find_first_zero_bit(&priv->used_icb, 28);
-       if (idx == 28)
-               return -ENOMEM;
-       plane->cache = &priv->icbs[idx];
-
-       idx = find_next_zero_bit(&priv->used_icb, 32, 28);
-       if (idx == 32)
-               return -ENOMEM;
-       plane->marker = &priv->icbs[idx];
-
-       mem = meram_alloc(priv, size * 1024);
-       if (mem == 0)
-               return -ENOMEM;
-
-       __set_bit(plane->marker->index, &priv->used_icb);
-       __set_bit(plane->cache->index, &priv->used_icb);
-
-       plane->marker->offset = mem - priv->meram;
-       plane->marker->size = size;
-
-       return 0;
-}
-
-/* Free ICBs and MERAM for a plane. */
-static void meram_plane_free(struct sh_mobile_meram_priv *priv,
-                            struct sh_mobile_meram_fb_plane *plane)
-{
-       meram_free(priv, priv->meram + plane->marker->offset,
-                  plane->marker->size * 1024);
-
-       __clear_bit(plane->marker->index, &priv->used_icb);
-       __clear_bit(plane->cache->index, &priv->used_icb);
-}
-
-/* Is this a YCbCr(NV12, NV16 or NV24) colorspace? */
-static int is_nvcolor(int cspace)
-{
-       if (cspace == SH_MOBILE_MERAM_PF_NV ||
-           cspace == SH_MOBILE_MERAM_PF_NV24)
-               return 1;
-       return 0;
-}
-
-/* Set the next address to fetch. */
-static void meram_set_next_addr(struct sh_mobile_meram_priv *priv,
-                               struct sh_mobile_meram_fb_cache *cache,
-                               unsigned long base_addr_y,
-                               unsigned long base_addr_c)
-{
-       struct sh_mobile_meram_icb *icb = cache->planes[0].marker;
-       unsigned long target;
-
-       icb->current_reg ^= 1;
-       target = icb->current_reg ? MExxSARB : MExxSARA;
-
-       /* set the next address to fetch */
-       meram_write_icb(priv->base, cache->planes[0].cache->index, target,
-                       base_addr_y);
-       meram_write_icb(priv->base, cache->planes[0].marker->index, target,
-                       base_addr_y + cache->planes[0].marker->cache_unit);
-
-       if (cache->nplanes == 2) {
-               meram_write_icb(priv->base, cache->planes[1].cache->index,
-                               target, base_addr_c);
-               meram_write_icb(priv->base, cache->planes[1].marker->index,
-                               target, base_addr_c +
-                               cache->planes[1].marker->cache_unit);
-       }
-}
-
-/* Get the next ICB address. */
-static void
-meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
-                       struct sh_mobile_meram_fb_cache *cache,
-                       unsigned long *icb_addr_y, unsigned long *icb_addr_c)
-{
-       struct sh_mobile_meram_icb *icb = cache->planes[0].marker;
-       unsigned long icb_offset;
-
-       if (pdata->addr_mode == SH_MOBILE_MERAM_MODE0)
-               icb_offset = 0x80000000 | (icb->current_reg << 29);
-       else
-               icb_offset = 0xc0000000 | (icb->current_reg << 23);
-
-       *icb_addr_y = icb_offset | (cache->planes[0].marker->index << 24);
-       if (cache->nplanes == 2)
-               *icb_addr_c = icb_offset
-                           | (cache->planes[1].marker->index << 24);
-}
-
-#define MERAM_CALC_BYTECOUNT(x, y) \
-       (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1))
-
-/* Initialize MERAM. */
-static int meram_plane_init(struct sh_mobile_meram_priv *priv,
-                           struct sh_mobile_meram_fb_plane *plane,
-                           unsigned int xres, unsigned int yres,
-                           unsigned int *out_pitch)
-{
-       struct sh_mobile_meram_icb *marker = plane->marker;
-       unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres);
-       unsigned long bnm;
-       unsigned int lcdc_pitch;
-       unsigned int xpitch;
-       unsigned int line_cnt;
-       unsigned int save_lines;
-
-       /* adjust pitch to 1024, 2048, 4096 or 8192 */
-       lcdc_pitch = (xres - 1) | 1023;
-       lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 1);
-       lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 2);
-       lcdc_pitch += 1;
-
-       /* derive settings */
-       if (lcdc_pitch == 8192 && yres >= 1024) {
-               lcdc_pitch = xpitch = MERAM_LINE_WIDTH;
-               line_cnt = total_byte_count >> 11;
-               *out_pitch = xres;
-               save_lines = plane->marker->size / 16 / MERAM_SEC_LINE;
-               save_lines *= MERAM_SEC_LINE;
-       } else {
-               xpitch = xres;
-               line_cnt = yres;
-               *out_pitch = lcdc_pitch;
-               save_lines = plane->marker->size / (lcdc_pitch >> 10) / 2;
-               save_lines &= 0xff;
-       }
-       bnm = (save_lines - 1) << 16;
-
-       /* TODO: we better to check if we have enough MERAM buffer size */
-
-       /* set up ICB */
-       meram_write_icb(priv->base, plane->cache->index,  MExxBSIZE,
-                       MERAM_MExxBSIZE_VAL(0x0, line_cnt - 1, xpitch - 1));
-       meram_write_icb(priv->base, plane->marker->index, MExxBSIZE,
-                       MERAM_MExxBSIZE_VAL(0xf, line_cnt - 1, xpitch - 1));
-
-       meram_write_icb(priv->base, plane->cache->index,  MExxMNCF, bnm);
-       meram_write_icb(priv->base, plane->marker->index, MExxMNCF, bnm);
-
-       meram_write_icb(priv->base, plane->cache->index,  MExxSBSIZE, xpitch);
-       meram_write_icb(priv->base, plane->marker->index, MExxSBSIZE, xpitch);
-
-       /* save a cache unit size */
-       plane->cache->cache_unit = xres * save_lines;
-       plane->marker->cache_unit = xres * save_lines;
-
-       /*
-        * Set MERAM for framebuffer
-        *
-        * we also chain the cache_icb and the marker_icb.
-        * we also split the allocated MERAM buffer between two ICBs.
-        */
-       meram_write_icb(priv->base, plane->cache->index, MExxCTL,
-                       MERAM_MExxCTL_VAL(plane->marker->index, marker->offset)
-                       | MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM |
-                       MExxCTL_MD_FB);
-       meram_write_icb(priv->base, plane->marker->index, MExxCTL,
-                       MERAM_MExxCTL_VAL(plane->cache->index, marker->offset +
-                                         plane->marker->size / 2) |
-                       MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM |
-                       MExxCTL_MD_FB);
-
-       return 0;
-}
-
-static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv,
-                               struct sh_mobile_meram_fb_plane *plane)
-{
-       /* disable ICB */
-       meram_write_icb(priv->base, plane->cache->index,  MExxCTL,
-                       MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF);
-       meram_write_icb(priv->base, plane->marker->index, MExxCTL,
-                       MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF);
-
-       plane->cache->cache_unit = 0;
-       plane->marker->cache_unit = 0;
-}
-
-/* -----------------------------------------------------------------------------
- * MERAM operations
- */
-
-unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata,
-                                   size_t size)
-{
-       struct sh_mobile_meram_priv *priv = pdata->priv;
-
-       return meram_alloc(priv, size);
-}
-EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc);
-
-void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem,
-                         size_t size)
-{
-       struct sh_mobile_meram_priv *priv = pdata->priv;
-
-       meram_free(priv, mem, size);
-}
-EXPORT_SYMBOL_GPL(sh_mobile_meram_free);
-
-/* Allocate memory for the ICBs and mark them as used. */
-static struct sh_mobile_meram_fb_cache *
-meram_cache_alloc(struct sh_mobile_meram_priv *priv,
-                 const struct sh_mobile_meram_cfg *cfg,
-                 int pixelformat)
-{
-       unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1;
-       struct sh_mobile_meram_fb_cache *cache;
-       int ret;
-
-       cache = kzalloc(sizeof(*cache), GFP_KERNEL);
-       if (cache == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       cache->nplanes = nplanes;
-
-       ret = meram_plane_alloc(priv, &cache->planes[0],
-                               cfg->icb[0].meram_size);
-       if (ret < 0)
-               goto error;
-
-       cache->planes[0].marker->current_reg = 1;
-       cache->planes[0].marker->pixelformat = pixelformat;
-
-       if (cache->nplanes == 1)
-               return cache;
-
-       ret = meram_plane_alloc(priv, &cache->planes[1],
-                               cfg->icb[1].meram_size);
-       if (ret < 0) {
-               meram_plane_free(priv, &cache->planes[0]);
-               goto error;
-       }
-
-       return cache;
-
-error:
-       kfree(cache);
-       return ERR_PTR(-ENOMEM);
-}
-
-void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata,
-                                 const struct sh_mobile_meram_cfg *cfg,
-                                 unsigned int xres, unsigned int yres,
-                                 unsigned int pixelformat, unsigned int *pitch)
-{
-       struct sh_mobile_meram_fb_cache *cache;
-       struct sh_mobile_meram_priv *priv = pdata->priv;
-       struct platform_device *pdev = pdata->pdev;
-       unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1;
-       unsigned int out_pitch;
-
-       if (priv == NULL)
-               return ERR_PTR(-ENODEV);
-
-       if (pixelformat != SH_MOBILE_MERAM_PF_NV &&
-           pixelformat != SH_MOBILE_MERAM_PF_NV24 &&
-           pixelformat != SH_MOBILE_MERAM_PF_RGB)
-               return ERR_PTR(-EINVAL);
-
-       dev_dbg(&pdev->dev, "registering %dx%d (%s)", xres, yres,
-               !pixelformat ? "yuv" : "rgb");
-
-       /* we can't handle wider than 8192px */
-       if (xres > 8192) {
-               dev_err(&pdev->dev, "width exceeding the limit (> 8192).");
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (cfg->icb[0].meram_size == 0)
-               return ERR_PTR(-EINVAL);
-
-       if (nplanes == 2 && cfg->icb[1].meram_size == 0)
-               return ERR_PTR(-EINVAL);
-
-       mutex_lock(&priv->lock);
-
-       /* We now register the ICBs and allocate the MERAM regions. */
-       cache = meram_cache_alloc(priv, cfg, pixelformat);
-       if (IS_ERR(cache)) {
-               dev_err(&pdev->dev, "MERAM allocation failed (%ld).",
-                       PTR_ERR(cache));
-               goto err;
-       }
-
-       /* initialize MERAM */
-       meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch);
-       *pitch = out_pitch;
-       if (pixelformat == SH_MOBILE_MERAM_PF_NV)
-               meram_plane_init(priv, &cache->planes[1],
-                                xres, (yres + 1) / 2, &out_pitch);
-       else if (pixelformat == SH_MOBILE_MERAM_PF_NV24)
-               meram_plane_init(priv, &cache->planes[1],
-                                2 * xres, (yres + 1) / 2, &out_pitch);
-
-err:
-       mutex_unlock(&priv->lock);
-       return cache;
-}
-EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc);
-
-void
-sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data)
-{
-       struct sh_mobile_meram_fb_cache *cache = data;
-       struct sh_mobile_meram_priv *priv = pdata->priv;
-
-       mutex_lock(&priv->lock);
-
-       /* Cleanup and free. */
-       meram_plane_cleanup(priv, &cache->planes[0]);
-       meram_plane_free(priv, &cache->planes[0]);
-
-       if (cache->nplanes == 2) {
-               meram_plane_cleanup(priv, &cache->planes[1]);
-               meram_plane_free(priv, &cache->planes[1]);
-       }
-
-       kfree(cache);
-
-       mutex_unlock(&priv->lock);
-}
-EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free);
-
-void
-sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data,
-                            unsigned long base_addr_y,
-                            unsigned long base_addr_c,
-                            unsigned long *icb_addr_y,
-                            unsigned long *icb_addr_c)
-{
-       struct sh_mobile_meram_fb_cache *cache = data;
-       struct sh_mobile_meram_priv *priv = pdata->priv;
-
-       mutex_lock(&priv->lock);
-
-       meram_set_next_addr(priv, cache, base_addr_y, base_addr_c);
-       meram_get_next_icb_addr(pdata, cache, icb_addr_y, icb_addr_c);
-
-       mutex_unlock(&priv->lock);
-}
-EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update);
-
-/* -----------------------------------------------------------------------------
- * Power management
- */
-
-#ifdef CONFIG_PM
-static int sh_mobile_meram_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
-       unsigned int i, j;
-
-       for (i = 0; i < MERAM_REGS_SIZE; i++)
-               priv->regs[i] = meram_read_reg(priv->base, common_regs[i]);
-
-       for (i = 0; i < 32; i++) {
-               if (!test_bit(i, &priv->used_icb))
-                       continue;
-               for (j = 0; j < ICB_REGS_SIZE; j++) {
-                       priv->icbs[i].regs[j] =
-                               meram_read_icb(priv->base, i, icb_regs[j]);
-                       /* Reset ICB on resume */
-                       if (icb_regs[j] == MExxCTL)
-                               priv->icbs[i].regs[j] |=
-                                       MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF;
-               }
-       }
-       return 0;
-}
-
-static int sh_mobile_meram_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
-       unsigned int i, j;
-
-       for (i = 0; i < 32; i++) {
-               if (!test_bit(i, &priv->used_icb))
-                       continue;
-               for (j = 0; j < ICB_REGS_SIZE; j++)
-                       meram_write_icb(priv->base, i, icb_regs[j],
-                                       priv->icbs[i].regs[j]);
-       }
-
-       for (i = 0; i < MERAM_REGS_SIZE; i++)
-               meram_write_reg(priv->base, common_regs[i], priv->regs[i]);
-       return 0;
-}
-#endif /* CONFIG_PM */
-
-static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops,
-                           sh_mobile_meram_suspend,
-                           sh_mobile_meram_resume, NULL);
-
-/* -----------------------------------------------------------------------------
- * Probe/remove and driver init/exit
- */
-
-static int sh_mobile_meram_probe(struct platform_device *pdev)
-{
-       struct sh_mobile_meram_priv *priv;
-       struct sh_mobile_meram_info *pdata = pdev->dev.platform_data;
-       struct resource *regs;
-       struct resource *meram;
-       unsigned int i;
-       int error;
-
-       if (!pdata) {
-               dev_err(&pdev->dev, "no platform data defined\n");
-               return -EINVAL;
-       }
-
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       meram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (regs == NULL || meram == NULL) {
-               dev_err(&pdev->dev, "cannot get platform resources\n");
-               return -ENOENT;
-       }
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               dev_err(&pdev->dev, "cannot allocate device data\n");
-               return -ENOMEM;
-       }
-
-       /* Initialize private data. */
-       mutex_init(&priv->lock);
-       priv->used_icb = pdata->reserved_icbs;
-
-       for (i = 0; i < MERAM_ICB_NUM; ++i)
-               priv->icbs[i].index = i;
-
-       pdata->priv = priv;
-       pdata->pdev = pdev;
-
-       /* Request memory regions and remap the registers. */
-       if (!request_mem_region(regs->start, resource_size(regs), pdev->name)) {
-               dev_err(&pdev->dev, "MERAM registers region already claimed\n");
-               error = -EBUSY;
-               goto err_req_regs;
-       }
-
-       if (!request_mem_region(meram->start, resource_size(meram),
-                               pdev->name)) {
-               dev_err(&pdev->dev, "MERAM memory region already claimed\n");
-               error = -EBUSY;
-               goto err_req_meram;
-       }
-
-       priv->base = ioremap_nocache(regs->start, resource_size(regs));
-       if (!priv->base) {
-               dev_err(&pdev->dev, "ioremap failed\n");
-               error = -EFAULT;
-               goto err_ioremap;
-       }
-
-       priv->meram = meram->start;
-
-       /* Create and initialize the MERAM memory pool. */
-       priv->pool = gen_pool_create(ilog2(MERAM_GRANULARITY), -1);
-       if (priv->pool == NULL) {
-               error = -ENOMEM;
-               goto err_genpool;
-       }
-
-       error = gen_pool_add(priv->pool, meram->start, resource_size(meram),
-                            -1);
-       if (error < 0)
-               goto err_genpool;
-
-       /* initialize ICB addressing mode */
-       if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1)
-               meram_write_reg(priv->base, MEVCR1, MEVCR1_AMD1);
-
-       platform_set_drvdata(pdev, priv);
-       pm_runtime_enable(&pdev->dev);
-
-       dev_info(&pdev->dev, "sh_mobile_meram initialized.");
-
-       return 0;
-
-err_genpool:
-       if (priv->pool)
-               gen_pool_destroy(priv->pool);
-       iounmap(priv->base);
-err_ioremap:
-       release_mem_region(meram->start, resource_size(meram));
-err_req_meram:
-       release_mem_region(regs->start, resource_size(regs));
-err_req_regs:
-       mutex_destroy(&priv->lock);
-       kfree(priv);
-
-       return error;
-}
-
-
-static int sh_mobile_meram_remove(struct platform_device *pdev)
-{
-       struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
-       struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       struct resource *meram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
-       pm_runtime_disable(&pdev->dev);
-
-       gen_pool_destroy(priv->pool);
-
-       iounmap(priv->base);
-       release_mem_region(meram->start, resource_size(meram));
-       release_mem_region(regs->start, resource_size(regs));
-
-       mutex_destroy(&priv->lock);
-
-       kfree(priv);
-
-       return 0;
-}
-
-static struct platform_driver sh_mobile_meram_driver = {
-       .driver = {
-               .name           = "sh_mobile_meram",
-               .pm             = &sh_mobile_meram_dev_pm_ops,
-       },
-       .probe          = sh_mobile_meram_probe,
-       .remove         = sh_mobile_meram_remove,
-};
-
-module_platform_driver(sh_mobile_meram_driver);
-
-MODULE_DESCRIPTION("SuperH Mobile MERAM driver");
-MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama");
-MODULE_LICENSE("GPL v2");
index 7f4e908..812a36c 100644 (file)
@@ -836,7 +836,7 @@ static void xxxfb_remove(struct pci_dev *dev)
  *     @dev: PCI device
  *     @msg: the suspend event code.
  *
- *      See Documentation/power/admin-guide/devices.rst for more information
+ *      See Documentation/driver-api/pm/devices.rst for more information
  */
 static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
 {
@@ -851,7 +851,7 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
  *     xxxfb_resume - Optional but recommended function. Resume the device.
  *     @dev: PCI device
  *
- *      See Documentation/power/admin-guide/devices.rst for more information
+ *      See Documentation/driver-api/pm/devices.rst for more information
  */
 static int xxxfb_resume(struct pci_dev *dev)
 {
@@ -915,7 +915,7 @@ static void __exit xxxfb_exit(void)
  *     @dev: platform device
  *     @msg: the suspend event code.
  *
- *      See Documentation/power/admin-guide/devices.rst for more information
+ *      See Documentation/driver-api/pm/devices.rst for more information
  */
 static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg)
 {
@@ -930,7 +930,7 @@ static int xxxfb_suspend(struct platform_device *dev, pm_message_t msg)
  *     xxxfb_resume - Optional but recommended function. Resume the device.
  *     @dev: platform device
  *
- *      See Documentation/power/admin-guide/devices.rst for more information
+ *      See Documentation/driver-api/pm/devices.rst for more information
  */
 static int xxxfb_resume(struct platform_dev *dev)
 {
index 6f0a195..dde52d0 100644 (file)
@@ -1932,8 +1932,7 @@ static int sm501fb_probe(struct platform_device *pdev)
        int ret;
 
        /* allocate our framebuffers */
-
-       info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL);
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
                dev_err(dev, "failed to allocate state\n");
                return -ENOMEM;
index 275dbbb..649d2ca 100644 (file)
 #include <linux/console.h>
 #include <linux/timer.h>
 
+#ifdef CONFIG_X86
+#include <asm/olpc.h>
+#else
+#define machine_is_olpc(x) 0
+#endif
+
 #include "debug.h"
 
 #include "viafbdev.h"
index 2245090..48969c6 100644 (file)
@@ -20,7 +20,6 @@
  */
 
 #include <linux/via-core.h>
-#include <asm/olpc.h>
 #include "global.h"
 #include "via_clock.h"
 
index 77774d8..b041eb2 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/platform_device.h>
 #include <linux/list.h>
 #include <linux/pm.h>
-#include <asm/olpc.h>
 
 /*
  * The default port config.
index bf269fa..3d0efdb 100644 (file)
@@ -25,7 +25,7 @@
 
 #include <linux/kernel.h>
 #include <linux/via-core.h>
-#include <asm/olpc.h>
+
 #include "via_clock.h"
 #include "global.h"
 #include "debug.h"
index 52f577b..d2f7850 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/stat.h>
 #include <linux/via-core.h>
 #include <linux/via_i2c.h>
-#include <asm/olpc.h>
 
 #define _MASTER_FILE
 #include "global.h"
index 57a27c4..56df483 100644 (file)
@@ -168,7 +168,7 @@ config BINFMT_MISC
          will automatically feed it to the correct interpreter.
 
          You can do other nice things, too. Read the file
-         <file:Documentation/binfmt_misc.txt> to learn how to use this
+         <file:Documentation/admin-guide/binfmt-misc.rst> to learn how to use this
          feature, <file:Documentation/admin-guide/java.rst> for information about how
          to include Java support. and <file:Documentation/admin-guide/mono.rst> for
           information about how to include Mono-based .NET support.
index 16f2dfe..aff7eec 100644 (file)
@@ -389,7 +389,7 @@ Version 0.4 (2001-10-28)
        (fs/nls/Config.in)
 
 * Added Configure.help entries for CONFIG_BEFS_FS and CONFIG_DEBUG_BEFS
-       (Documentation/Configure.help)
+       (currently at fs/befs/Kconfig)
 
 2001-08-??
 ==========
index 4de1915..4b5fff3 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 1997 Richard Günther
  *
  * binfmt_misc detects binaries via a magic or filename extension and invokes
- * a specified wrapper. See Documentation/binfmt_misc.txt for more details.
+ * a specified wrapper. See Documentation/admin-guide/binfmt-misc.rst for more details.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index 1161460..bfe9995 100644 (file)
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
        seq_putc(m, '\n');
 }
 
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+       struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+       struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+       seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+       seq_puts(m, "\t\tCapabilities: ");
+       if (iface->rdma_capable)
+               seq_puts(m, "rdma ");
+       if (iface->rss_capable)
+               seq_puts(m, "rss ");
+       seq_putc(m, '\n');
+       if (iface->sockaddr.ss_family == AF_INET)
+               seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+       else if (iface->sockaddr.ss_family == AF_INET6)
+               seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ skip_rdma:
                                              mid_entry->mid);
                        }
                        spin_unlock(&GlobalMid_Lock);
+
+                       spin_lock(&ses->iface_lock);
+                       if (ses->iface_count)
+                               seq_printf(m, "\n\tServer interfaces: %zu\n",
+                                          ses->iface_count);
+                       for (j = 0; j < ses->iface_count; j++) {
+                               seq_printf(m, "\t%d)\n", j);
+                               cifs_dump_iface(m, &ses->iface_list[j]);
+                       }
+                       spin_unlock(&ses->iface_lock);
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
index 937251c..ee2a8ec 100644 (file)
@@ -37,7 +37,6 @@
 #include <crypto/aead.h>
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
-                       int start,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash)
 {
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
        int rc;
        struct kvec *iov = rqst->rq_iov;
        int n_vec = rqst->rq_nvec;
+       int is_smb2 = server->vals->header_preamble_size == 0;
 
-       for (i = start; i < n_vec; i++) {
+       /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+       if (is_smb2) {
+               if (iov[0].iov_len <= 4)
+                       return -EIO;
+               i = 0;
+       } else {
+               if (n_vec < 2 || iov[0].iov_len != 4)
+                       return -EIO;
+               i = 1; /* skip rfc1002 length */
+       }
+
+       for (; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
                        cifs_dbg(VFS, "null iovec entry\n");
                        return -EIO;
                }
-               if (i == 1 && iov[1].iov_len <= 4)
-                       break; /* nothing to sign or corrupt header */
+
                rc = crypto_shash_update(shash,
                                         iov[i].iov_base, iov[i].iov_len);
                if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
                return rc;
        }
 
-       return __cifs_calc_signature(rqst, 1, server, signature,
+       return __cifs_calc_signature(rqst, server, signature,
                                     &server->secmech.sdescmd5->shash);
 }
 
index 1efa2e6..bd78da5 100644 (file)
@@ -33,6 +33,9 @@
 
 #define CIFS_MAGIC_NUMBER 0xFF534D42      /* the first four bytes of SMB PDUs */
 
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -312,6 +315,10 @@ struct smb_version_operations {
        /* send echo request */
        int (*echo)(struct TCP_Server_Info *);
        /* create directory */
+       int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+                       umode_t mode, struct cifs_tcon *tcon,
+                       const char *full_path,
+                       struct cifs_sb_info *cifs_sb);
        int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
                     struct cifs_sb_info *);
        /* set info on created directory */
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 
 #endif
 
+struct cifs_server_iface {
+       size_t speed;
+       unsigned int rdma_capable : 1;
+       unsigned int rss_capable : 1;
+       struct sockaddr_storage sockaddr;
+};
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -875,6 +889,20 @@ struct cifs_ses {
 #ifdef CONFIG_CIFS_SMB311
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 #endif /* 3.1.1 */
+
+       /*
+        * Network interfaces available on the server this session is
+        * connected to.
+        *
+        * Other channels can be opened by connecting and binding this
+        * session to interfaces from this list.
+        *
+        * iface_lock should be taken when accessing any of these fields
+        */
+       spinlock_t iface_lock;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       unsigned long iface_last_update; /* jiffies */
 };
 
 static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
        return ses->server->vals->cap_unix & ses->capabilities;
 }
 
+struct cached_fid {
+       bool is_valid:1;        /* Do we have a useable root fid */
+       struct cifs_fid *fid;
+       struct mutex fid_mutex;
+       struct cifs_tcon *tcon;
+       struct work_struct lease_break;
+};
+
 /*
  * there is one of these for each connection to a resource on a particular
  * session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
        struct fscache_cookie *fscache; /* cookie for share */
 #endif
        struct list_head pending_opens; /* list of incomplete opens */
-       bool valid_root_fid:1;  /* Do we have a useable root fid */
-       struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
-       struct cifs_fid *prfid; /* handle to the directory at top of share */
+       struct cached_fid crfid; /* Cached root fid */
        /* BB add field for back pointer to sb struct(s)? */
 };
 
index 4e0d183..03018be 100644 (file)
@@ -112,10 +112,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */, const int flags,
                        struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
-                         struct kvec *pkvec, int nvec_to_send,
-                         int *pbuftype, const int flags,
-                         struct kvec *presp);
 extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct cifs_tcon *ptcon,
                        struct smb_hdr *in_buf ,
@@ -544,7 +540,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
                           struct cifs_sb_info *cifs_sb,
                           const unsigned char *path, char *pbuf,
                           unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash);
 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +548,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
 struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
 void cifs_aio_ctx_release(struct kref *refcount);
 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
 
 int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
                    struct sdesc **sdesc);
index 42329b2..d352da3 100644 (file)
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        }
        spin_unlock(&tcon->open_file_lock);
 
-       mutex_lock(&tcon->prfid_mutex);
-       tcon->valid_root_fid = false;
-       memset(tcon->prfid, 0, sizeof(struct cifs_fid));
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_lock(&tcon->crfid.fid_mutex);
+       tcon->crfid.is_valid = false;
+       memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+       mutex_unlock(&tcon->crfid.fid_mutex);
 
        /*
         * BB Add call to invalidate_inodes(sb) for all superblocks mounted
index 96645a7..a57da1b 100644 (file)
@@ -57,9 +57,6 @@
 #include "smb2proto.h"
 #include "smbdirect.h"
 
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
 extern mempool_t *cifs_req_poolp;
 extern bool disable_legacy_dialects;
 
@@ -3029,8 +3026,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 
 #ifdef CONFIG_CIFS_SMB311
        if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
-               if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+               if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
                        tcon->posix_extensions = true;
+                       printk_once(KERN_WARNING
+                               "SMB3.11 POSIX Extensions are experimental\n");
+               }
        }
 #endif /* 311 */
 
index f4697f5..a2cfb33 100644 (file)
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                goto mkdir_out;
        }
 
+       server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+       if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+               rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+                                             cifs_sb);
+               d_drop(direntry); /* for time being always refresh inode info */
+               goto mkdir_out;
+       }
+#endif /* SMB311 */
+
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                        goto mkdir_out;
        }
 
-       server = tcon->ses->server;
-
        if (!server->ops->mkdir) {
                rc = -ENOSYS;
                goto mkdir_out;
index af29ade..53e8362 100644 (file)
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->smb_ses_list);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                mutex_init(&ret_buf->session_mutex);
+               spin_lock_init(&ret_buf->iface_lock);
        }
        return ret_buf;
 }
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kzfree(buf_to_free->auth_key.response);
+       kfree(buf_to_free->iface_list);
        kzfree(buf_to_free);
 }
 
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->openFileList);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                spin_lock_init(&ret_buf->open_file_lock);
-               mutex_init(&ret_buf->prfid_mutex);
-               ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+               mutex_init(&ret_buf->crfid.fid_mutex);
+               ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+                                            GFP_KERNEL);
 #ifdef CONFIG_CIFS_STATS
                spin_lock_init(&ret_buf->stat_lock);
 #endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
        atomic_dec(&tconInfoAllocCount);
        kfree(buf_to_free->nativeFileSystem);
        kzfree(buf_to_free->password);
-       kfree(buf_to_free->prfid);
+       kfree(buf_to_free->crfid.fid);
        kfree(buf_to_free);
 }
 
index e2bec47..3ff7cec 100644 (file)
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
 #ifdef CONFIG_CIFS_SMB311
        /* SMB311 POSIX extensions paths do not include leading slash */
        else if (cifs_sb_master_tlink(cifs_sb) &&
-                cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+                cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+                (from[0] == '/')) {
                start_of_path = from + 1;
        }
 #endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
 {
        struct smb2_lease_break_work *lw = container_of(work,
                                struct smb2_lease_break_work, lease_break);
-       int rc;
+       int rc = 0;
 
        rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
                              lw->lease_state);
+
        cifs_dbg(FYI, "Lease release rc %d\n", rc);
        cifs_put_tlink(lw->tlink);
        kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 
                open->oplock = lease_state;
        }
+
        return found;
 }
 
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
                                        return true;
                                }
                                spin_unlock(&tcon->open_file_lock);
+
+                               if (tcon->crfid.is_valid &&
+                                   !memcmp(rsp->LeaseKey,
+                                           tcon->crfid.fid->lease_key,
+                                           SMB2_LEASE_KEY_SIZE)) {
+                                       INIT_WORK(&tcon->crfid.lease_break,
+                                                 smb2_cached_lease_break);
+                                       queue_work(cifsiod_wq,
+                                                  &tcon->crfid.lease_break);
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       return true;
+                               }
                        }
                }
        }
index b15f595..0356b55 100644 (file)
@@ -294,34 +294,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+                       size_t buf_len,
+                       struct cifs_server_iface **iface_list,
+                       size_t *iface_count)
+{
+       struct network_interface_info_ioctl_rsp *p;
+       struct sockaddr_in *addr4;
+       struct sockaddr_in6 *addr6;
+       struct iface_info_ipv4 *p4;
+       struct iface_info_ipv6 *p6;
+       struct cifs_server_iface *info;
+       ssize_t bytes_left;
+       size_t next = 0;
+       int nb_iface = 0;
+       int rc = 0;
+
+       *iface_list = NULL;
+       *iface_count = 0;
+
+       /*
+        * Fist pass: count and sanity check
+        */
+
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               nb_iface++;
+               next = le32_to_cpu(p->Next);
+               if (!next) {
+                       bytes_left -= sizeof(*p);
+                       break;
+               }
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!nb_iface) {
+               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (bytes_left || p->Next)
+               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+       /*
+        * Second pass: extract info to internal structure
+        */
+
+       *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+       if (!*iface_list) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       info = *iface_list;
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               info->speed = le64_to_cpu(p->LinkSpeed);
+               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+                        le32_to_cpu(p->Capability));
+
+               switch (p->Family) {
+               /*
+                * The kernel and wire socket structures have the same
+                * layout and use network byte order but make the
+                * conversion explicit in case either one changes.
+                */
+               case INTERNETWORK:
+                       addr4 = (struct sockaddr_in *)&info->sockaddr;
+                       p4 = (struct iface_info_ipv4 *)p->Buffer;
+                       addr4->sin_family = AF_INET;
+                       memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+                       /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+                       addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+                                &addr4->sin_addr);
+                       break;
+               case INTERNETWORKV6:
+                       addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+                       p6 = (struct iface_info_ipv6 *)p->Buffer;
+                       addr6->sin6_family = AF_INET6;
+                       memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+                       /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+                       addr6->sin6_flowinfo = 0;
+                       addr6->sin6_scope_id = 0;
+                       addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+                                &addr6->sin6_addr);
+                       break;
+               default:
+                       cifs_dbg(VFS,
+                                "%s: skipping unsupported socket family\n",
+                                __func__);
+                       goto next_iface;
+               }
+
+               (*iface_count)++;
+               info++;
+next_iface:
+               next = le32_to_cpu(p->Next);
+               if (!next)
+                       break;
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!*iface_count) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (rc) {
+               kfree(*iface_list);
+               *iface_count = 0;
+               *iface_list = NULL;
+       }
+       return rc;
+}
+
+
 static int
 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc;
        unsigned int ret_data_len = 0;
-       struct network_interface_info_ioctl_rsp *out_buf;
+       struct network_interface_info_ioctl_rsp *out_buf = NULL;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
                        (char **)&out_buf, &ret_data_len);
-       if (rc != 0)
+       if (rc != 0) {
                cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
-       else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
-               cifs_dbg(VFS, "server returned bad net interface info buf\n");
-               rc = -EINVAL;
-       } else {
-               /* Dump info on first interface */
-               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
-                       le32_to_cpu(out_buf->Capability));
-               cifs_dbg(FYI, "Link Speed %lld\n",
-                       le64_to_cpu(out_buf->LinkSpeed));
+               goto out;
        }
+
+       rc = parse_server_interfaces(out_buf, ret_data_len,
+                                    &iface_list, &iface_count);
+       if (rc)
+               goto out;
+
+       spin_lock(&ses->iface_lock);
+       kfree(ses->iface_list);
+       ses->iface_list = iface_list;
+       ses->iface_count = iface_count;
+       ses->iface_last_update = jiffies;
+       spin_unlock(&ses->iface_lock);
+
+out:
        kfree(out_buf);
        return rc;
 }
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+       struct cached_fid *cfid = container_of(work,
+                               struct cached_fid, lease_break);
+       mutex_lock(&cfid->fid_mutex);
+       if (cfid->is_valid) {
+               cifs_dbg(FYI, "clear cached root file handle\n");
+               SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+                          cfid->fid->volatile_fid);
+               cfid->is_valid = false;
+       }
+       mutex_unlock(&cfid->fid_mutex);
+}
 
 /*
  * Open the directory at the root of a share
@@ -331,13 +488,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        struct cifs_open_parms oparams;
        int rc;
        __le16 srch_path = 0; /* Null - since an open of top of share */
-       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       u8 oplock = SMB2_OPLOCK_LEVEL_II;
 
-       mutex_lock(&tcon->prfid_mutex);
-       if (tcon->valid_root_fid) {
+       mutex_lock(&tcon->crfid.fid_mutex);
+       if (tcon->crfid.is_valid) {
                cifs_dbg(FYI, "found a cached root file handle\n");
-               memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
-               mutex_unlock(&tcon->prfid_mutex);
+               memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+               mutex_unlock(&tcon->crfid.fid_mutex);
                return 0;
        }
 
@@ -350,10 +507,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 
        rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
        if (rc == 0) {
-               memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
-               tcon->valid_root_fid = true;
+               memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+               tcon->crfid.tcon = tcon;
+               tcon->crfid.is_valid = true;
        }
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_unlock(&tcon->crfid.fid_mutex);
        return rc;
 }
 
@@ -383,9 +541,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
        if (rc)
                return;
 
-#ifdef CONFIG_CIFS_STATS2
        SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
 
        SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
                        FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +592,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
 
-       if ((*full_path == 0) && tcon->valid_root_fid)
+       if ((*full_path == 0) && tcon->crfid.is_valid)
                return 0;
 
        utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
                   struct smb_rqst *old_rq)
 {
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
 
        memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 }
 
 /* Assumes:
- * rqst->rq_iov[0]  is rfc1002 length
- * rqst->rq_iov[1]  is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0]  is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
  */
 static struct scatterlist *
 init_sg(struct smb_rqst *rqst, u8 *sign)
 {
-       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        struct scatterlist *sg;
        unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
                return NULL;
 
        sg_init_table(sg, sg_len);
-       smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
-       for (i = 1; i < rqst->rq_nvec - 1; i++)
-               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
-                                               rqst->rq_iov[i+1].iov_len);
+       smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+       for (i = 1; i < rqst->rq_nvec; i++)
+               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+                                               rqst->rq_iov[i].iov_len);
        for (j = 0; i < sg_len - 1; i++, j++) {
                unsigned int len, offset;
 
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
        return 1;
 }
 /*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0]   - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
  * untouched.
  */
 static int
 crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 {
        struct smb2_transform_hdr *tr_hdr =
-                       (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        int rc = 0;
        struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ free_req:
        return rc;
 }
 
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
 static int
 smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                       struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        struct page **pages;
        struct smb2_transform_hdr *tr_hdr;
        unsigned int npages = old_rq->rq_npages;
-       unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+       unsigned int orig_len;
        int i;
        int rc = -ENOMEM;
 
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                        goto err_free_pages;
        }
 
-       /* Make space for one extra iov to hold the transform header */
        iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
                            GFP_KERNEL);
        if (!iov)
                goto err_free_pages;
 
-       /* copy all iovs from the old except the 1st one (rfc1002 length) */
-       memcpy(&iov[2], &old_rq->rq_iov[1],
-                               sizeof(struct kvec) * (old_rq->rq_nvec - 1));
-       /* copy the rfc1002 iov */
-       iov[0].iov_base = old_rq->rq_iov[0].iov_base;
-       iov[0].iov_len  = old_rq->rq_iov[0].iov_len;
+       /* copy all iovs from the old */
+       memcpy(&iov[1], &old_rq->rq_iov[0],
+                               sizeof(struct kvec) * old_rq->rq_nvec);
 
        new_rq->rq_iov = iov;
        new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        if (!tr_hdr)
                goto err_free_iov;
 
+       orig_len = smb2_rqst_len(old_rq, false);
+
        /* fill the 2nd iov with a transform header */
        fill_transform_hdr(tr_hdr, orig_len, old_rq);
-       new_rq->rq_iov[1].iov_base = tr_hdr;
-       new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
-       /* Update rfc1002 header */
-       inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
-                       sizeof(struct smb2_transform_hdr));
+       new_rq->rq_iov[0].iov_base = tr_hdr;
+       new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
 
        /* copy pages form the old */
        for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
                put_page(rqst->rq_pages[i]);
        kfree(rqst->rq_pages);
        /* free transform header */
-       kfree(rqst->rq_iov[1].iov_base);
+       kfree(rqst->rq_iov[0].iov_base);
        kfree(rqst->rq_iov);
 }
 
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
                 unsigned int buf_data_size, struct page **pages,
                 unsigned int npages, unsigned int page_data_size)
 {
-       struct kvec iov[3];
+       struct kvec iov[2];
        struct smb_rqst rqst = {NULL};
        int rc;
 
-       iov[0].iov_base = NULL;
-       iov[0].iov_len = 0;
-       iov[1].iov_base = buf;
-       iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-       iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
-       iov[2].iov_len = buf_data_size;
+       iov[0].iov_base = buf;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+       iov[1].iov_len = buf_data_size;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 3;
+       rqst.rq_nvec = 2;
        rqst.rq_pages = pages;
        rqst.rq_npages = npages;
        rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
        if (rc)
                return rc;
 
-       memmove(buf, iov[2].iov_base, buf_data_size);
+       memmove(buf, iov[1].iov_base, buf_data_size);
 
        server->total_read = buf_data_size + page_data_size;
 
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
        .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
+       .posix_mkdir = smb311_posix_mkdir,
        .rmdir = smb2_rmdir,
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
index af032e1..810b857 100644 (file)
@@ -602,6 +602,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
 int
 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_negotiate_req *req;
        struct smb2_negotiate_rsp *rsp;
        struct kvec iov[1];
@@ -673,7 +674,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
        /*
@@ -990,8 +995,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        req->PreviousSessionId = sess_data->previous_session;
 
        req->Flags = 0; /* MBZ */
-       /* to enable echos and oplocks */
-       req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+       /* enough to enable echos and oplocks and one max size write */
+       req->sync_hdr.CreditRequest = cpu_to_le16(130);
 
        /* only one of SMB2 signing flags may be set in SMB2 request */
        if (server->sign)
@@ -1027,6 +1033,7 @@ static int
 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
 {
        int rc;
+       struct smb_rqst rqst;
        struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
        struct kvec rsp_iov = { NULL, 0 };
 
@@ -1035,10 +1042,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
                cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
        req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
 
-       /* BB add code to build os and lm fields */
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = sess_data->iov;
+       rqst.rq_nvec = 2;
 
-       rc = smb2_send_recv(sess_data->xid, sess_data->ses,
-                           sess_data->iov, 2,
+       /* BB add code to build os and lm fields */
+       rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+                           &rqst,
                            &sess_data->buf0_type,
                            CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
        cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1386,7 @@ out:
 int
 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_logoff_req *req; /* response is also trivial struct */
        int rc = 0;
        struct TCP_Server_Info *server;
@@ -1413,7 +1424,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        /*
         * No tcon so can't do
@@ -1443,6 +1458,7 @@ int
 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
          struct cifs_tcon *tcon, const struct nls_table *cp)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_connect_req *req;
        struct smb2_tree_connect_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -1499,7 +1515,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
            !smb3_encryption_required(tcon))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
 
@@ -1563,6 +1583,7 @@ tcon_error_exit:
 int
 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_disconnect_req *req; /* response is trivial */
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1614,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        if (rc)
                cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1886,11 +1911,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
        return 0;
 }
 
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb)
+{
+       struct smb_rqst rqst;
+       struct smb2_create_req *req;
+       struct smb2_create_rsp *rsp;
+       struct TCP_Server_Info *server;
+       struct cifs_ses *ses = tcon->ses;
+       struct kvec iov[3]; /* make sure at least one for each open context */
+       struct kvec rsp_iov = {NULL, 0};
+       int resp_buftype;
+       int uni_path_len;
+       __le16 *copy_path = NULL;
+       int copy_size;
+       int rc = 0;
+       unsigned int n_iov = 2;
+       __u32 file_attributes = 0;
+       char *pc_buf = NULL;
+       int flags = 0;
+       unsigned int total_len;
+       __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+       if (!path)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "mkdir\n");
+
+       if (ses && (ses->server))
+               server = ses->server;
+       else
+               return -EIO;
+
+       rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+       if (rc)
+               return rc;
+
+       if (smb3_encryption_required(tcon))
+               flags |= CIFS_TRANSFORM_REQ;
+
+
+       req->ImpersonationLevel = IL_IMPERSONATION;
+       req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+       /* File attributes ignored on open (used in create though) */
+       req->FileAttributes = cpu_to_le32(file_attributes);
+       req->ShareAccess = FILE_SHARE_ALL_LE;
+       req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+       req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+       iov[0].iov_base = (char *)req;
+       /* -1 since last byte is buf[0] which is sent below (path) */
+       iov[0].iov_len = total_len - 1;
+
+       req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+       /* [MS-SMB2] 2.2.13 NameOffset:
+        * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+        * the SMB2 header, the file name includes a prefix that will
+        * be processed during DFS name normalization as specified in
+        * section 3.3.5.9. Otherwise, the file name is relative to
+        * the share that is identified by the TreeId in the SMB2
+        * header.
+        */
+       if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+               int name_len;
+
+               req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+               rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+                                                &name_len,
+                                                tcon->treeName, path);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       return rc;
+               }
+               req->NameLength = cpu_to_le16(name_len * 2);
+               uni_path_len = copy_size;
+               path = copy_path;
+       } else {
+               uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+               /* MUST set path len (NameLength) to 0 opening root of share */
+               req->NameLength = cpu_to_le16(uni_path_len - 2);
+               if (uni_path_len % 8 != 0) {
+                       copy_size = roundup(uni_path_len, 8);
+                       copy_path = kzalloc(copy_size, GFP_KERNEL);
+                       if (!copy_path) {
+                               cifs_small_buf_release(req);
+                               return -ENOMEM;
+                       }
+                       memcpy((char *)copy_path, (const char *)path,
+                              uni_path_len);
+                       uni_path_len = copy_size;
+                       path = copy_path;
+               }
+       }
+
+       iov[1].iov_len = uni_path_len;
+       iov[1].iov_base = path;
+       req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (tcon->posix_extensions) {
+               if (n_iov > 2) {
+                       struct create_context *ccontext =
+                           (struct create_context *)iov[n_iov-1].iov_base;
+                       ccontext->Next =
+                               cpu_to_le32(iov[n_iov-1].iov_len);
+               }
+
+               rc = add_posix_context(iov, &n_iov, mode);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       kfree(copy_path);
+                       return rc;
+               }
+               pc_buf = iov[n_iov-1].iov_base;
+       }
+
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+                           &rsp_iov);
+
+       cifs_small_buf_release(req);
+       rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+       if (rc != 0) {
+               cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+               trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+               goto smb311_mkdir_exit;
+       } else
+               trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+                                    ses->Suid, CREATE_NOT_FILE,
+                                    FILE_WRITE_ATTRIBUTES);
+
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+       /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+       kfree(copy_path);
+       kfree(pc_buf);
+       free_rsp_buf(resp_buftype, rsp);
+       return rc;
+
+}
+#endif /* SMB311 */
+
 int
 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
          __u8 *oplock, struct smb2_file_all_info *buf,
          struct kvec *err_iov, int *buftype)
 {
+       struct smb_rqst rqst;
        struct smb2_create_req *req;
        struct smb2_create_rsp *rsp;
        struct TCP_Server_Info *server;
@@ -2043,7 +2222,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        }
 #endif /* SMB311 */
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2282,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           char *in_data, u32 indatalen,
           char **out_data, u32 *plen /* returned data len */)
 {
+       struct smb_rqst rqst;
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct cifs_ses *ses;
@@ -2189,7 +2373,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2462,7 @@ int
 SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
                 u64 persistent_fid, u64 volatile_fid, int flags)
 {
+       struct smb_rqst rqst;
        struct smb2_close_req *req;
        struct smb2_close_rsp *rsp;
        struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2490,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
 
@@ -2387,6 +2580,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
           u32 additional_info, size_t output_len, size_t min_len, void **data,
                u32 *dlen)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_req *req;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -2427,7 +2621,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
 
@@ -2594,11 +2792,10 @@ SMB2_echo(struct TCP_Server_Info *server)
 {
        struct smb2_echo_req *req;
        int rc = 0;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { .rq_iov = iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        cifs_dbg(FYI, "In echo request\n");
 
@@ -2614,11 +2811,8 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        req->sync_hdr.CreditRequest = cpu_to_le16(1);
 
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len;
+       iov[0].iov_base = (char *)req;
 
        rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
                             server, CIFS_ECHO_OP);
@@ -2633,6 +2827,7 @@ int
 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid)
 {
+       struct smb_rqst rqst;
        struct smb2_flush_req *req;
        struct cifs_ses *ses = tcon->ses;
        struct kvec iov[1];
@@ -2660,7 +2855,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc != 0) {
@@ -2848,10 +3047,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
        struct smb2_sync_hdr *shdr;
        struct cifs_io_parms io_parms;
        struct smb_rqst rqst = { .rq_iov = rdata->iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        struct TCP_Server_Info *server;
        unsigned int total_len;
-       __be32 req_len;
 
        cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
                 __func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3080,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
        if (smb3_encryption_required(io_parms.tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       req_len = cpu_to_be32(total_len);
-
-       rdata->iov[0].iov_base = &req_len;
-       rdata->iov[0].iov_len = sizeof(__be32);
-       rdata->iov[1].iov_base = buf;
-       rdata->iov[1].iov_len = total_len;
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = total_len;
 
        shdr = (struct smb2_sync_hdr *)buf;
 
@@ -2926,6 +3120,7 @@ int
 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
          unsigned int *nbytes, char **buf, int *buf_type)
 {
+       struct smb_rqst rqst;
        int resp_buftype, rc = -EACCES;
        struct smb2_read_plain_req *req = NULL;
        struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3141,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3261,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct smb2_sync_hdr *shdr;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
        if (rc) {
@@ -3137,15 +3335,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
                v1->length = cpu_to_le32(wdata->mr->mr->length);
        }
 #endif
-       /* 4 for rfc1002 length field and 1 for Buffer */
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len - 1;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len - 1;
+       iov[0].iov_base = (char *)req;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 2;
+       rqst.rq_nvec = 1;
        rqst.rq_pages = wdata->pages;
        rqst.rq_offset = wdata->page_offset;
        rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3347,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_tailsz = wdata->tailsz;
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr) {
-               iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+               iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
                rqst.rq_npages = 0;
        }
 #endif
@@ -3210,6 +3404,7 @@ int
 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
           unsigned int *nbytes, struct kvec *iov, int n_vec)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_write_req *req = NULL;
        struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3446,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_vec + 1;
+
+       rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3522,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, int index,
                     struct cifs_search_info *srch_inf)
 {
+       struct smb_rqst rqst;
        struct smb2_query_directory_req *req;
        struct smb2_query_directory_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -3395,7 +3595,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_base = (char *)(req->Buffer);
        iov[1].iov_len = len;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
 
@@ -3454,6 +3658,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
               u8 info_type, u32 additional_info, unsigned int num,
                void **data, unsigned int *size)
 {
+       struct smb_rqst rqst;
        struct smb2_set_info_req *req;
        struct smb2_set_info_rsp *rsp = NULL;
        struct kvec *iov;
@@ -3509,7 +3714,11 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                iov[i].iov_len = size[i];
        }
 
-       rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = num;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -3664,6 +3873,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                  const u64 persistent_fid, const u64 volatile_fid,
                  __u8 oplock_level)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3902,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
@@ -3755,6 +3969,7 @@ int
 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3773,7 +3988,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4017,7 @@ int
 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, int level)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3829,7 +4049,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4092,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
           const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
           const __u32 num_lock, struct smb2_lock_element *buf)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_lock_req *req = NULL;
        struct kvec iov[2];
@@ -3900,7 +4125,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_len = count;
 
        cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
-       rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        if (rc) {
@@ -3934,6 +4164,7 @@ int
 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
                 __u8 *lease_key, const __le32 lease_state)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_lease_ack *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4195,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
index a345560..824ddde 100644 (file)
@@ -851,8 +851,11 @@ struct validate_negotiate_info_rsp {
        __le16 Dialect; /* Dialect in use for the connection */
 } __packed;
 
-#define RSS_CAPABLE    0x00000001
-#define RDMA_CAPABLE   0x00000002
+#define RSS_CAPABLE    cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE   cpu_to_le32(0x00000002)
+
+#define INTERNETWORK   cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
 
 struct network_interface_info_ioctl_rsp {
        __le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +863,21 @@ struct network_interface_info_ioctl_rsp {
        __le32 Capability; /* RSS or RDMA Capable */
        __le32 Reserved;
        __le64 LinkSpeed;
-       char    SockAddr_Storage[128];
+       __le16 Family;
+       __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+       __be16 Port;
+       __be32 IPv4Address;
+       __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+       __be16 Port;
+       __be32 FlowInfo;
+       __u8   IPv6Address[16];
+       __be32 ScopeId;
 } __packed;
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
index c840200..3ae208a 100644 (file)
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
                              struct cifs_sb_info *cifs_sb, bool set_alloc);
 extern int smb2_set_file_info(struct inode *inode, const char *full_path,
                              FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb);
 extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *name, struct cifs_sb_info *cifs_sb);
 extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
 extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
 extern void smb2_reconnect_server(struct work_struct *work);
 extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long
+smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
index 349d5cc..51b9437 100644 (file)
@@ -171,9 +171,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
        unsigned char *sigptr = smb2_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -204,7 +202,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index,  server, sigptr,
+       rc = __cifs_calc_signature(rqst, server, sigptr,
                &server->secmech.sdeschmacsha256->shash);
 
        if (!rc)
@@ -414,9 +412,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb3_signature[SMB2_CMACAES_SIZE];
        unsigned char *sigptr = smb3_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -447,7 +443,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
+       rc = __cifs_calc_signature(rqst, server, sigptr,
                                   &server->secmech.sdesccmacaes->shash);
 
        if (!rc)
@@ -462,7 +458,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
        int rc = 0;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
 
        if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
            server->tcpStatus == CifsNeedNegotiate)
@@ -635,7 +631,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +652,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(server, shdr);
index e459c97..6fd94d9 100644 (file)
@@ -18,6 +18,7 @@
 #include "smbdirect.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
+#include "smb2proto.h"
 
 static struct smbd_response *get_empty_queue_buffer(
                struct smbd_connection *info);
@@ -2087,7 +2088,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
        struct kvec vec;
        int nvecs;
        int size;
-       unsigned int buflen = 0, remaining_data_length;
+       unsigned int buflen, remaining_data_length;
        int start, i, j;
        int max_iov_size =
                info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2112,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
                return -EINVAL;
        }
-       iov = &rqst->rq_iov[1];
-
-       /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec-1; i++) {
-               buflen += iov[i].iov_len;
-       }
 
        /*
         * Add in the page array if there is one. The caller needs to set
         * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
         * ends at page boundary
         */
-       if (rqst->rq_npages) {
-               if (rqst->rq_npages == 1)
-                       buflen += rqst->rq_tailsz;
-               else
-                       buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
-                                       rqst->rq_offset + rqst->rq_tailsz;
-       }
+       buflen = smb2_rqst_len(rqst, true);
 
        if (buflen + sizeof(struct smbd_data_transfer) >
                info->max_fragmented_send_size) {
@@ -2139,6 +2128,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                goto done;
        }
 
+       iov = &rqst->rq_iov[1];
+
        cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
        for (i = 0; i < rqst->rq_nvec-1; i++)
                dump_smb(iov[i].iov_base, iov[i].iov_len);
index 61e74d4..67e413f 100644 (file)
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
        TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
 
 DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
 
 DECLARE_EVENT_CLASS(smb3_open_done_class,
        TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
        TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
 
 DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
 
 #endif /* _CIFS_TRACE_H */
 
index 1f1a68f..fb57dfb 100644 (file)
@@ -201,15 +201,24 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
        return 0;
 }
 
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker)
 {
        unsigned int i;
-       struct kvec *iov = rqst->rq_iov;
+       struct kvec *iov;
+       int nvec;
        unsigned long buflen = 0;
 
+       if (skip_rfc1002_marker && rqst->rq_iov[0].iov_len == 4) {
+               iov = &rqst->rq_iov[1];
+               nvec = rqst->rq_nvec - 1;
+       } else {
+               iov = rqst->rq_iov;
+               nvec = rqst->rq_nvec;
+       }
+
        /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec; i++)
+       for (i = 0; i < nvec; i++)
                buflen += iov[i].iov_len;
 
        /*
@@ -236,18 +245,20 @@ rqst_len(struct smb_rqst *rqst)
 }
 
 static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+               struct smb_rqst *rqst)
 {
-       int rc;
-       struct kvec *iov = rqst->rq_iov;
-       int n_vec = rqst->rq_nvec;
-       unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
-       unsigned long send_length;
-       unsigned int i;
+       int rc = 0;
+       struct kvec *iov;
+       int n_vec;
+       unsigned int send_length = 0;
+       unsigned int i, j;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
        struct msghdr smb_msg;
        int val = 1;
+       __be32 rfc1002_marker;
+
        if (cifs_rdma_enabled(server) && server->smbd_conn) {
                rc = smbd_send(server->smbd_conn, rqst);
                goto smbd_done;
@@ -255,51 +266,67 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
        if (ssocket == NULL)
                return -ENOTSOCK;
 
-       /* sanity check send length */
-       send_length = rqst_len(rqst);
-       if (send_length != smb_buf_length + 4) {
-               WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
-                       send_length, smb_buf_length);
-               return -EIO;
-       }
-
-       if (n_vec < 2)
-               return -EIO;
-
-       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
-       dump_smb(iov[0].iov_base, iov[0].iov_len);
-       dump_smb(iov[1].iov_base, iov[1].iov_len);
-
        /* cork the socket */
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       size = 0;
-       for (i = 0; i < n_vec; i++)
-               size += iov[i].iov_len;
+       for (j = 0; j < num_rqst; j++)
+               send_length += smb2_rqst_len(&rqst[j], true);
+       rfc1002_marker = cpu_to_be32(send_length);
 
-       iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+       /* Generate a rfc1002 marker for SMB2+ */
+       if (server->vals->header_preamble_size == 0) {
+               struct kvec hiov = {
+                       .iov_base = &rfc1002_marker,
+                       .iov_len  = 4
+               };
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+                             1, 4);
+               rc = smb_send_kvec(server, &smb_msg, &sent);
+               if (rc < 0)
+                       goto uncork;
 
-       rc = smb_send_kvec(server, &smb_msg, &sent);
-       if (rc < 0)
-               goto uncork;
+               total_len += sent;
+               send_length += 4;
+       }
 
-       total_len += sent;
+       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 
-       /* now walk the page array and send each page in it */
-       for (i = 0; i < rqst->rq_npages; i++) {
-               struct bio_vec bvec;
+       for (j = 0; j < num_rqst; j++) {
+               iov = rqst[j].rq_iov;
+               n_vec = rqst[j].rq_nvec;
 
-               bvec.bv_page = rqst->rq_pages[i];
-               rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+               size = 0;
+               for (i = 0; i < n_vec; i++) {
+                       dump_smb(iov[i].iov_base, iov[i].iov_len);
+                       size += iov[i].iov_len;
+               }
+
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+                             iov, n_vec, size);
 
-               iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
-                             &bvec, 1, bvec.bv_len);
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
-                       break;
+                       goto uncork;
 
                total_len += sent;
+
+               /* now walk the page array and send each page in it */
+               for (i = 0; i < rqst[j].rq_npages; i++) {
+                       struct bio_vec bvec;
+
+                       bvec.bv_page = rqst[j].rq_pages[i];
+                       rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+                                            &bvec.bv_offset);
+
+                       iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+                                     &bvec, 1, bvec.bv_len);
+                       rc = smb_send_kvec(server, &smb_msg, &sent);
+                       if (rc < 0)
+                               break;
+
+                       total_len += sent;
+               }
        }
 
 uncork:
@@ -308,9 +335,9 @@ uncork:
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+       if ((total_len > 0) && (total_len != send_length)) {
                cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
-                        smb_buf_length + 4, total_len);
+                        send_length, total_len);
                /*
                 * If we have only sent part of an SMB then the next SMB could
                 * be taken as the remainder of this one. We need to kill the
@@ -335,7 +362,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        int rc;
 
        if (!(flags & CIFS_TRANSFORM_REQ))
-               return __smb_send_rqst(server, rqst);
+               return __smb_send_rqst(server, 1, rqst);
 
        if (!server->ops->init_transform_rq ||
            !server->ops->free_transform_rq) {
@@ -347,7 +374,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        if (rc)
                return rc;
 
-       rc = __smb_send_rqst(server, &cur_rqst);
+       rc = __smb_send_rqst(server, 1, &cur_rqst);
        server->ops->free_transform_rq(&cur_rqst);
        return rc;
 }
@@ -365,7 +392,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
        iov[1].iov_base = (char *)smb_buffer + 4;
        iov[1].iov_len = smb_buf_length;
 
-       return __smb_send_rqst(server, &rqst);
+       return __smb_send_rqst(server, 1, &rqst);
 }
 
 static int
@@ -730,7 +757,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * to the same server. We may make this configurable later or
         * use ses->maxReq.
         */
-
        rc = wait_for_free_request(ses->server, timeout, optype);
        if (rc)
                return rc;
@@ -766,8 +792,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
-               smb311_update_preauth_hash(ses, rqst->rq_iov+1,
-                                          rqst->rq_nvec-1);
+               smb311_update_preauth_hash(ses, rqst->rq_iov,
+                                          rqst->rq_nvec);
 #endif
 
        if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +838,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
                struct kvec iov = {
-                       .iov_base = buf,
-                       .iov_len = midQ->resp_buf_size
+                       .iov_base = resp_iov->iov_base,
+                       .iov_len = resp_iov->iov_len
                };
                smb311_update_preauth_hash(ses, &iov, 1);
        }
@@ -872,49 +898,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
-              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
-              const int flags, struct kvec *resp_iov)
-{
-       struct smb_rqst rqst;
-       struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
-       int rc;
-       int i;
-       __u32 count;
-       __be32 rfc1002_marker;
-
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
-               new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
-                                       GFP_KERNEL);
-               if (!new_iov)
-                       return -ENOMEM;
-       } else
-               new_iov = s_iov;
-
-       /* 1st iov is an RFC1002 Session Message length */
-       memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
-       count = 0;
-       for (i = 1; i < n_vec + 1; i++)
-               count += new_iov[i].iov_len;
-
-       rfc1002_marker = cpu_to_be32(count);
-
-       new_iov[0].iov_base = &rfc1002_marker;
-       new_iov[0].iov_len = 4;
-
-       memset(&rqst, 0, sizeof(struct smb_rqst));
-       rqst.rq_iov = new_iov;
-       rqst.rq_nvec = n_vec + 1;
-
-       rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
-               kfree(new_iov);
-       return rc;
-}
-
 int
 SendReceive(const unsigned int xid, struct cifs_ses *ses,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
index c60f3d3..a679798 100644 (file)
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
        if (size > PSIZE) {
                /*
                 * To keep the rest of the code simple.  Allocate a
-                * contiguous buffer to work with
+                * contiguous buffer to work with. Make the buffer large
+                * enough to make use of the whole extent.
                 */
-               ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+               ea_buf->max_size = (size + sb->s_blocksize - 1) &
+                   ~(sb->s_blocksize - 1);
+
+               ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
                if (ea_buf->xattr == NULL)
                        return -ENOMEM;
 
                ea_buf->flag = EA_MALLOC;
-               ea_buf->max_size = (size + sb->s_blocksize - 1) &
-                   ~(sb->s_blocksize - 1);
 
                if (ea_size == 0)
                        return 0;
index 63a1ca4..e2bea2a 100644 (file)
@@ -79,12 +79,11 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
  */
 static int dnotify_handle_event(struct fsnotify_group *group,
                                struct inode *inode,
-                               struct fsnotify_mark *inode_mark,
-                               struct fsnotify_mark *vfsmount_mark,
                                u32 mask, const void *data, int data_type,
                                const unsigned char *file_name, u32 cookie,
                                struct fsnotify_iter_info *iter_info)
 {
+       struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
        struct dnotify_mark *dn_mark;
        struct dnotify_struct *dn;
        struct dnotify_struct **prev;
@@ -95,7 +94,8 @@ static int dnotify_handle_event(struct fsnotify_group *group,
        if (!S_ISDIR(inode->i_mode))
                return 0;
 
-       BUG_ON(vfsmount_mark);
+       if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info)))
+               return 0;
 
        dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark);
 
@@ -319,7 +319,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
                dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
                spin_lock(&fsn_mark->lock);
        } else {
-               error = fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
+               error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0);
                if (error) {
                        mutex_unlock(&dnotify_group->mark_mutex);
                        goto out_err;
index d94e803..f90842e 100644 (file)
@@ -87,17 +87,17 @@ static int fanotify_get_response(struct fsnotify_group *group,
        return ret;
 }
 
-static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
-                                      struct fsnotify_mark *vfsmnt_mark,
-                                      u32 event_mask,
-                                      const void *data, int data_type)
+static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
+                                      u32 event_mask, const void *data,
+                                      int data_type)
 {
        __u32 marks_mask = 0, marks_ignored_mask = 0;
        const struct path *path = data;
+       struct fsnotify_mark *mark;
+       int type;
 
-       pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
-                " data_type=%d\n", __func__, inode_mark, vfsmnt_mark,
-                event_mask, data, data_type);
+       pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
+                __func__, iter_info->report_mask, event_mask, data, data_type);
 
        /* if we don't have enough info to send an event to userspace say no */
        if (data_type != FSNOTIFY_EVENT_PATH)
@@ -108,20 +108,21 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
            !d_can_lookup(path->dentry))
                return false;
 
-       /*
-        * if the event is for a child and this inode doesn't care about
-        * events on the child, don't send it!
-        */
-       if (inode_mark &&
-           (!(event_mask & FS_EVENT_ON_CHILD) ||
-            (inode_mark->mask & FS_EVENT_ON_CHILD))) {
-               marks_mask |= inode_mark->mask;
-               marks_ignored_mask |= inode_mark->ignored_mask;
-       }
+       fsnotify_foreach_obj_type(type) {
+               if (!fsnotify_iter_should_report_type(iter_info, type))
+                       continue;
+               mark = iter_info->marks[type];
+               /*
+                * if the event is for a child and this inode doesn't care about
+                * events on the child, don't send it!
+                */
+               if (type == FSNOTIFY_OBJ_TYPE_INODE &&
+                   (event_mask & FS_EVENT_ON_CHILD) &&
+                   !(mark->mask & FS_EVENT_ON_CHILD))
+                       continue;
 
-       if (vfsmnt_mark) {
-               marks_mask |= vfsmnt_mark->mask;
-               marks_ignored_mask |= vfsmnt_mark->ignored_mask;
+               marks_mask |= mark->mask;
+               marks_ignored_mask |= mark->ignored_mask;
        }
 
        if (d_is_dir(path->dentry) &&
@@ -178,8 +179,6 @@ init: __maybe_unused
 
 static int fanotify_handle_event(struct fsnotify_group *group,
                                 struct inode *inode,
-                                struct fsnotify_mark *inode_mark,
-                                struct fsnotify_mark *fanotify_mark,
                                 u32 mask, const void *data, int data_type,
                                 const unsigned char *file_name, u32 cookie,
                                 struct fsnotify_iter_info *iter_info)
@@ -199,8 +198,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
        BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
        BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
 
-       if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data,
-                                       data_type))
+       if (!fanotify_should_send_event(iter_info, mask, data, data_type))
                return 0;
 
        pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
index d478629..10aac19 100644 (file)
@@ -77,7 +77,7 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
        struct inotify_inode_mark *inode_mark;
        struct inode *inode;
 
-       if (!(mark->connector->flags & FSNOTIFY_OBJ_TYPE_INODE))
+       if (mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE)
                return;
 
        inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
@@ -116,7 +116,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
        if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)
                mflags |= FAN_MARK_IGNORED_SURV_MODIFY;
 
-       if (mark->connector->flags & FSNOTIFY_OBJ_TYPE_INODE) {
+       if (mark->connector->type == FSNOTIFY_OBJ_TYPE_INODE) {
                inode = igrab(mark->connector->inode);
                if (!inode)
                        return;
@@ -126,7 +126,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
                show_mark_fhandle(m, inode);
                seq_putc(m, '\n');
                iput(inode);
-       } else if (mark->connector->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
+       } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
                struct mount *mnt = real_mount(mark->connector->mnt);
 
                seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n",
index 613ec7e..f174397 100644 (file)
@@ -184,8 +184,6 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
 EXPORT_SYMBOL_GPL(__fsnotify_parent);
 
 static int send_to_group(struct inode *to_tell,
-                        struct fsnotify_mark *inode_mark,
-                        struct fsnotify_mark *vfsmount_mark,
                         __u32 mask, const void *data,
                         int data_is, u32 cookie,
                         const unsigned char *file_name,
@@ -195,48 +193,45 @@ static int send_to_group(struct inode *to_tell,
        __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
        __u32 marks_mask = 0;
        __u32 marks_ignored_mask = 0;
+       struct fsnotify_mark *mark;
+       int type;
 
-       if (unlikely(!inode_mark && !vfsmount_mark)) {
-               BUG();
+       if (WARN_ON(!iter_info->report_mask))
                return 0;
-       }
 
        /* clear ignored on inode modification */
        if (mask & FS_MODIFY) {
-               if (inode_mark &&
-                   !(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
-                       inode_mark->ignored_mask = 0;
-               if (vfsmount_mark &&
-                   !(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
-                       vfsmount_mark->ignored_mask = 0;
-       }
-
-       /* does the inode mark tell us to do something? */
-       if (inode_mark) {
-               group = inode_mark->group;
-               marks_mask |= inode_mark->mask;
-               marks_ignored_mask |= inode_mark->ignored_mask;
+               fsnotify_foreach_obj_type(type) {
+                       if (!fsnotify_iter_should_report_type(iter_info, type))
+                               continue;
+                       mark = iter_info->marks[type];
+                       if (mark &&
+                           !(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+                               mark->ignored_mask = 0;
+               }
        }
 
-       /* does the vfsmount_mark tell us to do something? */
-       if (vfsmount_mark) {
-               group = vfsmount_mark->group;
-               marks_mask |= vfsmount_mark->mask;
-               marks_ignored_mask |= vfsmount_mark->ignored_mask;
+       fsnotify_foreach_obj_type(type) {
+               if (!fsnotify_iter_should_report_type(iter_info, type))
+                       continue;
+               mark = iter_info->marks[type];
+               /* does the object mark tell us to do something? */
+               if (mark) {
+                       group = mark->group;
+                       marks_mask |= mark->mask;
+                       marks_ignored_mask |= mark->ignored_mask;
+               }
        }
 
-       pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
-                " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x"
+       pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x"
                 " data=%p data_is=%d cookie=%d\n",
-                __func__, group, to_tell, mask, inode_mark, vfsmount_mark,
-                marks_mask, marks_ignored_mask, data,
-                data_is, cookie);
+                __func__, group, to_tell, mask, marks_mask, marks_ignored_mask,
+                data, data_is, cookie);
 
        if (!(test_mask & marks_mask & ~marks_ignored_mask))
                return 0;
 
-       return group->ops->handle_event(group, to_tell, inode_mark,
-                                       vfsmount_mark, mask, data, data_is,
+       return group->ops->handle_event(group, to_tell, mask, data, data_is,
                                        file_name, cookie, iter_info);
 }
 
@@ -263,6 +258,57 @@ static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
        return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
 }
 
+/*
+ * iter_info is a multi head priority queue of marks.
+ * Pick a subset of marks from queue heads, all with the
+ * same group and set the report_mask for selected subset.
+ * Returns the report_mask of the selected subset.
+ */
+static unsigned int fsnotify_iter_select_report_types(
+               struct fsnotify_iter_info *iter_info)
+{
+       struct fsnotify_group *max_prio_group = NULL;
+       struct fsnotify_mark *mark;
+       int type;
+
+       /* Choose max prio group among groups of all queue heads */
+       fsnotify_foreach_obj_type(type) {
+               mark = iter_info->marks[type];
+               if (mark &&
+                   fsnotify_compare_groups(max_prio_group, mark->group) > 0)
+                       max_prio_group = mark->group;
+       }
+
+       if (!max_prio_group)
+               return 0;
+
+       /* Set the report mask for marks from same group as max prio group */
+       iter_info->report_mask = 0;
+       fsnotify_foreach_obj_type(type) {
+               mark = iter_info->marks[type];
+               if (mark &&
+                   fsnotify_compare_groups(max_prio_group, mark->group) == 0)
+                       fsnotify_iter_set_report_type(iter_info, type);
+       }
+
+       return iter_info->report_mask;
+}
+
+/*
+ * Pop from iter_info multi head queue, the marks that were iterated in the
+ * current iteration step.
+ */
+static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
+{
+       int type;
+
+       fsnotify_foreach_obj_type(type) {
+               if (fsnotify_iter_should_report_type(iter_info, type))
+                       iter_info->marks[type] =
+                               fsnotify_next_mark(iter_info->marks[type]);
+       }
+}
+
 /*
  * This is the main call to fsnotify.  The VFS calls into hook specific functions
  * in linux/fsnotify.h.  Those functions then in turn call here.  Here will call
@@ -307,15 +353,15 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
 
        if ((mask & FS_MODIFY) ||
            (test_mask & to_tell->i_fsnotify_mask)) {
-               iter_info.inode_mark =
+               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
                        fsnotify_first_mark(&to_tell->i_fsnotify_marks);
        }
 
        if (mnt && ((mask & FS_MODIFY) ||
                    (test_mask & mnt->mnt_fsnotify_mask))) {
-               iter_info.inode_mark =
+               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
                        fsnotify_first_mark(&to_tell->i_fsnotify_marks);
-               iter_info.vfsmount_mark =
+               iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
                        fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
        }
 
@@ -324,32 +370,14 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
         * ignore masks are properly reflected for mount mark notifications.
         * That's why this traversal is so complicated...
         */
-       while (iter_info.inode_mark || iter_info.vfsmount_mark) {
-               struct fsnotify_mark *inode_mark = iter_info.inode_mark;
-               struct fsnotify_mark *vfsmount_mark = iter_info.vfsmount_mark;
-
-               if (inode_mark && vfsmount_mark) {
-                       int cmp = fsnotify_compare_groups(inode_mark->group,
-                                                         vfsmount_mark->group);
-                       if (cmp > 0)
-                               inode_mark = NULL;
-                       else if (cmp < 0)
-                               vfsmount_mark = NULL;
-               }
-
-               ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
-                                   data, data_is, cookie, file_name,
-                                   &iter_info);
+       while (fsnotify_iter_select_report_types(&iter_info)) {
+               ret = send_to_group(to_tell, mask, data, data_is, cookie,
+                                   file_name, &iter_info);
 
                if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
                        goto out;
 
-               if (inode_mark)
-                       iter_info.inode_mark =
-                               fsnotify_next_mark(iter_info.inode_mark);
-               if (vfsmount_mark)
-                       iter_info.vfsmount_mark =
-                               fsnotify_next_mark(iter_info.vfsmount_mark);
+               fsnotify_iter_next(&iter_info);
        }
        ret = 0;
 out:
index 60f365d..34515d2 100644 (file)
@@ -9,12 +9,6 @@
 
 #include "../mount.h"
 
-struct fsnotify_iter_info {
-       struct fsnotify_mark *inode_mark;
-       struct fsnotify_mark *vfsmount_mark;
-       int srcu_idx;
-};
-
 /* destroy all events sitting in this groups notification queue */
 extern void fsnotify_flush_notify(struct fsnotify_group *group);
 
index b7a4b6a..aa5468f 100644 (file)
@@ -67,7 +67,7 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
        fsnotify_group_stop_queueing(group);
 
        /* Clear all marks for this group and queue them for destruction */
-       fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES);
+       fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK);
 
        /*
         * Some marks can still be pinned when waiting for response from
index c00d2ca..7e4578d 100644 (file)
@@ -25,8 +25,6 @@ extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
                                           struct fsnotify_group *group);
 extern int inotify_handle_event(struct fsnotify_group *group,
                                struct inode *inode,
-                               struct fsnotify_mark *inode_mark,
-                               struct fsnotify_mark *vfsmount_mark,
                                u32 mask, const void *data, int data_type,
                                const unsigned char *file_name, u32 cookie,
                                struct fsnotify_iter_info *iter_info);
index 40dedb3..9ab6dde 100644 (file)
@@ -65,12 +65,11 @@ static int inotify_merge(struct list_head *list,
 
 int inotify_handle_event(struct fsnotify_group *group,
                         struct inode *inode,
-                        struct fsnotify_mark *inode_mark,
-                        struct fsnotify_mark *vfsmount_mark,
                         u32 mask, const void *data, int data_type,
                         const unsigned char *file_name, u32 cookie,
                         struct fsnotify_iter_info *iter_info)
 {
+       struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
        struct inotify_inode_mark *i_mark;
        struct inotify_event_info *event;
        struct fsnotify_event *fsn_event;
@@ -78,7 +77,8 @@ int inotify_handle_event(struct fsnotify_group *group,
        int len = 0;
        int alloc_len = sizeof(struct inotify_event_info);
 
-       BUG_ON(vfsmount_mark);
+       if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info)))
+               return 0;
 
        if ((inode_mark->mask & FS_EXCL_UNLINK) &&
            (data_type == FSNOTIFY_EVENT_PATH)) {
index ef32f36..1cf5b77 100644 (file)
@@ -485,10 +485,14 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
                                    struct fsnotify_group *group)
 {
        struct inotify_inode_mark *i_mark;
+       struct fsnotify_iter_info iter_info = { };
+
+       fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE,
+                                          fsn_mark);
 
        /* Queue ignore event for the watch */
-       inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
-                            NULL, FSNOTIFY_EVENT_NONE, NULL, 0, NULL);
+       inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL,
+                            FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info);
 
        i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
        /* remove this mark from the idr */
@@ -578,7 +582,7 @@ static int inotify_new_watch(struct fsnotify_group *group,
        }
 
        /* we are on the idr, now get on the inode */
-       ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, inode, NULL, 0);
+       ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
        if (ret) {
                /* we failed to get on the inode, get off the idr */
                inotify_remove_from_idr(group, tmp_i_mark);
index e9191b4..61f4c5f 100644 (file)
@@ -119,9 +119,9 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
                if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
                        new_mask |= mark->mask;
        }
-       if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE)
+       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
                conn->inode->i_fsnotify_mask = new_mask;
-       else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+       else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT)
                real_mount(conn->mnt)->mnt_fsnotify_mask = new_mask;
 }
 
@@ -139,7 +139,7 @@ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
        spin_lock(&conn->lock);
        __fsnotify_recalc_mask(conn);
        spin_unlock(&conn->lock);
-       if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE)
+       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
                __fsnotify_update_child_dentry_flags(conn->inode);
 }
 
@@ -166,18 +166,18 @@ static struct inode *fsnotify_detach_connector_from_object(
 {
        struct inode *inode = NULL;
 
-       if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) {
+       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
                inode = conn->inode;
                rcu_assign_pointer(inode->i_fsnotify_marks, NULL);
                inode->i_fsnotify_mask = 0;
                conn->inode = NULL;
-               conn->flags &= ~FSNOTIFY_OBJ_TYPE_INODE;
-       } else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
+               conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
+       } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
                rcu_assign_pointer(real_mount(conn->mnt)->mnt_fsnotify_marks,
                                   NULL);
                real_mount(conn->mnt)->mnt_fsnotify_mask = 0;
                conn->mnt = NULL;
-               conn->flags &= ~FSNOTIFY_OBJ_TYPE_VFSMOUNT;
+               conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
        }
 
        return inode;
@@ -294,12 +294,12 @@ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
 
 bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
 {
-       /* This can fail if mark is being removed */
-       if (!fsnotify_get_mark_safe(iter_info->inode_mark))
-               return false;
-       if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) {
-               fsnotify_put_mark_wake(iter_info->inode_mark);
-               return false;
+       int type;
+
+       fsnotify_foreach_obj_type(type) {
+               /* This can fail if mark is being removed */
+               if (!fsnotify_get_mark_safe(iter_info->marks[type]))
+                       goto fail;
        }
 
        /*
@@ -310,13 +310,20 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
        srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
 
        return true;
+
+fail:
+       for (type--; type >= 0; type--)
+               fsnotify_put_mark_wake(iter_info->marks[type]);
+       return false;
 }
 
 void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
 {
+       int type;
+
        iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
-       fsnotify_put_mark_wake(iter_info->inode_mark);
-       fsnotify_put_mark_wake(iter_info->vfsmount_mark);
+       fsnotify_foreach_obj_type(type)
+               fsnotify_put_mark_wake(iter_info->marks[type]);
 }
 
 /*
@@ -442,10 +449,10 @@ static int fsnotify_attach_connector_to_object(
        spin_lock_init(&conn->lock);
        INIT_HLIST_HEAD(&conn->list);
        if (inode) {
-               conn->flags = FSNOTIFY_OBJ_TYPE_INODE;
+               conn->type = FSNOTIFY_OBJ_TYPE_INODE;
                conn->inode = igrab(inode);
        } else {
-               conn->flags = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
+               conn->type = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
                conn->mnt = mnt;
        }
        /*
@@ -479,8 +486,7 @@ static struct fsnotify_mark_connector *fsnotify_grab_connector(
        if (!conn)
                goto out;
        spin_lock(&conn->lock);
-       if (!(conn->flags & (FSNOTIFY_OBJ_TYPE_INODE |
-                            FSNOTIFY_OBJ_TYPE_VFSMOUNT))) {
+       if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) {
                spin_unlock(&conn->lock);
                srcu_read_unlock(&fsnotify_mark_srcu, idx);
                return NULL;
@@ -646,16 +652,16 @@ struct fsnotify_mark *fsnotify_find_mark(
        return NULL;
 }
 
-/* Clear any marks in a group with given type */
+/* Clear any marks in a group with given type mask */
 void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
-                                  unsigned int type)
+                                  unsigned int type_mask)
 {
        struct fsnotify_mark *lmark, *mark;
        LIST_HEAD(to_free);
        struct list_head *head = &to_free;
 
        /* Skip selection step if we want to clear all marks. */
-       if (type == FSNOTIFY_OBJ_ALL_TYPES) {
+       if (type_mask == FSNOTIFY_OBJ_ALL_TYPES_MASK) {
                head = &group->marks_list;
                goto clear;
        }
@@ -670,7 +676,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
         */
        mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
        list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
-               if (mark->connector->flags & type)
+               if ((1U << mark->connector->type) & type_mask)
                        list_move(&mark->g_list, &to_free);
        }
        mutex_unlock(&group->mark_mutex);
index 079a465..dd28079 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Documentation/ABI/stable/orangefs-sysfs:
+ * Documentation/ABI/stable/sysfs-fs-orangefs:
  *
  * What:               /sys/fs/orangefs/perf_counter_reset
  * Date:               June 2015
index b657294..aaffc0c 100644 (file)
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        if (env_start != arg_end || env_start >= env_end)
                env_start = env_end = arg_end;
 
+       /* .. and limit it to a maximum of one page of slop */
+       if (env_end >= arg_end + PAGE_SIZE)
+               env_end = arg_end + PAGE_SIZE - 1;
+
        /* We're not going to care if "*ppos" has high bits set */
        pos = arg_start + *ppos;
 
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        while (count) {
                int got;
                size_t size = min_t(size_t, PAGE_SIZE, count);
+               long offset;
 
-               got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
-               if (got <= 0)
+               /*
+                * Are we already starting past the official end?
+                * We always include the last byte that is *supposed*
+                * to be NUL
+                */
+               offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+               got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+               if (got <= offset)
                        break;
+               got -= offset;
 
                /* Don't walk past a NUL character once you hit arg_end */
                if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                                n = arg_end - pos - 1;
 
                        /* Cut off at first NUL after 'n' */
-                       got = n + strnlen(page+n, got-n);
-                       if (!got)
+                       got = n + strnlen(page+n, offset+got-n);
+                       if (got < offset)
                                break;
+                       got -= offset;
+
+                       /* Include the NUL if it existed */
+                       if (got < size)
+                               got++;
                }
 
-               got -= copy_to_user(buf, page, got);
+               got -= copy_to_user(buf, page+offset, got);
                if (unlikely(!got)) {
                        if (!len)
                                len = -EFAULT;
index 2480469..e0a9c23 100644 (file)
@@ -1,6 +1,6 @@
 /* Asymmetric public-key cryptography key subtype
  *
- * See Documentation/security/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.txt
  *
  * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index b382407..1cb77cd 100644 (file)
@@ -1,6 +1,6 @@
 /* Asymmetric Public-key cryptography key type interface
  *
- * See Documentation/security/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.txt
  *
  * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index a89df3b..65e3832 100644 (file)
@@ -1,6 +1,6 @@
 /* Generic associative array implementation.
  *
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
  *
  * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index 711275e..a00a065 100644 (file)
@@ -1,6 +1,6 @@
 /* Private definitions for the generic associative array implementation.
  *
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
  *
  * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index fb35517..e3147eb 100644 (file)
@@ -281,8 +281,6 @@ void blk_freeze_queue_start(struct request_queue *q);
 void blk_mq_freeze_queue_wait(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
                                     unsigned long timeout);
-int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
-               int (reinit_request)(void *, struct request *));
 
 int blk_mq_map_queues(struct blk_mq_tag_set *set);
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
index bca3a92..9154570 100644 (file)
@@ -127,6 +127,8 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_ZONE_WRITE_LOCKED  ((__force req_flags_t)(1 << 19))
 /* already slept for hybrid poll */
 #define RQF_MQ_POLL_SLEPT      ((__force req_flags_t)(1 << 20))
+/* ->timeout has been called, don't expire again */
+#define RQF_TIMED_OUT          ((__force req_flags_t)(1 << 21))
 
 /* flags that prevent us from merging requests: */
 #define RQF_NOMERGE_FLAGS \
@@ -560,7 +562,6 @@ struct request_queue {
        unsigned int            dma_alignment;
 
        struct blk_queue_tag    *queue_tags;
-       struct list_head        tag_busy_list;
 
        unsigned int            nr_sorted;
        unsigned int            in_flight[2];
@@ -1373,7 +1374,6 @@ extern void blk_queue_end_tag(struct request_queue *, struct request *);
 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
 extern void blk_queue_free_tags(struct request_queue *);
 extern int blk_queue_resize_tags(struct request_queue *, int);
-extern void blk_queue_invalidate_tags(struct request_queue *);
 extern struct blk_queue_tag *blk_init_tags(int, int);
 extern void blk_free_tags(struct blk_queue_tag *);
 
index 7cf262a..b3233e8 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * See Documentation/circular-buffers.txt for more information.
+ * See Documentation/core-api/circular-buffers.rst for more information.
  */
 
 #ifndef _LINUX_CIRC_BUF_H
index b67bf6a..3c5a4cb 100644 (file)
@@ -48,7 +48,7 @@
  *   CMA should not be used by the device drivers directly. It is
  *   only a helper framework for dma-mapping subsystem.
  *
- *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ *   For more information, see kernel-docs in kernel/dma/contiguous.c
  */
 
 #ifdef __KERNEL__
index e64c029..b38964a 100644 (file)
@@ -98,8 +98,6 @@ struct fsnotify_iter_info;
 struct fsnotify_ops {
        int (*handle_event)(struct fsnotify_group *group,
                            struct inode *inode,
-                           struct fsnotify_mark *inode_mark,
-                           struct fsnotify_mark *vfsmount_mark,
                            u32 mask, const void *data, int data_type,
                            const unsigned char *file_name, u32 cookie,
                            struct fsnotify_iter_info *iter_info);
@@ -201,6 +199,57 @@ struct fsnotify_group {
 #define FSNOTIFY_EVENT_PATH    1
 #define FSNOTIFY_EVENT_INODE   2
 
+enum fsnotify_obj_type {
+       FSNOTIFY_OBJ_TYPE_INODE,
+       FSNOTIFY_OBJ_TYPE_VFSMOUNT,
+       FSNOTIFY_OBJ_TYPE_COUNT,
+       FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
+};
+
+#define FSNOTIFY_OBJ_TYPE_INODE_FL     (1U << FSNOTIFY_OBJ_TYPE_INODE)
+#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL  (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+#define FSNOTIFY_OBJ_ALL_TYPES_MASK    ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
+
+struct fsnotify_iter_info {
+       struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
+       unsigned int report_mask;
+       int srcu_idx;
+};
+
+static inline bool fsnotify_iter_should_report_type(
+               struct fsnotify_iter_info *iter_info, int type)
+{
+       return (iter_info->report_mask & (1U << type));
+}
+
+static inline void fsnotify_iter_set_report_type(
+               struct fsnotify_iter_info *iter_info, int type)
+{
+       iter_info->report_mask |= (1U << type);
+}
+
+static inline void fsnotify_iter_set_report_type_mark(
+               struct fsnotify_iter_info *iter_info, int type,
+               struct fsnotify_mark *mark)
+{
+       iter_info->marks[type] = mark;
+       iter_info->report_mask |= (1U << type);
+}
+
+#define FSNOTIFY_ITER_FUNCS(name, NAME) \
+static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
+               struct fsnotify_iter_info *iter_info) \
+{ \
+       return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
+               iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
+}
+
+FSNOTIFY_ITER_FUNCS(inode, INODE)
+FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
+
+#define fsnotify_foreach_obj_type(type) \
+       for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
+
 /*
  * Inode / vfsmount point to this structure which tracks all marks attached to
  * the inode / vfsmount. The reference to inode / vfsmount is held by this
@@ -209,11 +258,7 @@ struct fsnotify_group {
  */
 struct fsnotify_mark_connector {
        spinlock_t lock;
-#define FSNOTIFY_OBJ_TYPE_INODE                0x01
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT     0x02
-#define FSNOTIFY_OBJ_ALL_TYPES         (FSNOTIFY_OBJ_TYPE_INODE | \
-                                        FSNOTIFY_OBJ_TYPE_VFSMOUNT)
-       unsigned int flags;     /* Type of object [lock] */
+       unsigned int type;      /* Type of object [lock] */
        union { /* Object pointer [lock] */
                struct inode *inode;
                struct vfsmount *mnt;
@@ -356,7 +401,21 @@ extern struct fsnotify_mark *fsnotify_find_mark(
 extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
                             struct vfsmount *mnt, int allow_dups);
 extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
-                                   struct inode *inode, struct vfsmount *mnt, int allow_dups);
+                                   struct inode *inode, struct vfsmount *mnt,
+                                   int allow_dups);
+/* attach the mark to the inode */
+static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+                                         struct inode *inode,
+                                         int allow_dups)
+{
+       return fsnotify_add_mark(mark, inode, NULL, allow_dups);
+}
+static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
+                                                struct inode *inode,
+                                                int allow_dups)
+{
+       return fsnotify_add_mark_locked(mark, inode, NULL, allow_dups);
+}
 /* given a group and a mark, flag mark to be freed when all references are dropped */
 extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
                                  struct fsnotify_group *group);
@@ -369,12 +428,12 @@ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned
 /* run all the marks in a group, and clear all of the vfsmount marks */
 static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
 {
-       fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
+       fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
 }
 /* run all the marks in a group, and clear all of the inode marks */
 static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
 {
-       fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
+       fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
 }
 extern void fsnotify_get_mark(struct fsnotify_mark *mark);
 extern void fsnotify_put_mark(struct fsnotify_mark *mark);
index 9c3c9a3..8154f49 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Ftrace header.  For implementation details beyond the random comments
- * scattered below, see: Documentation/trace/ftrace-design.txt
+ * scattered below, see: Documentation/trace/ftrace-design.rst
  */
 
 #ifndef _LINUX_FTRACE_H
index 2014bd1..96a71a6 100644 (file)
@@ -501,6 +501,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_SKU,
        DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
index 997b066..18602ca 100644 (file)
@@ -7,7 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- * For further information, see the Documentation/spi/sc18is602 file.
+ * For further information, see the Documentation/spi/spi-sc18is602 file.
  */
 
 /**
index 7c686d3..ee495d7 100644 (file)
@@ -18,9 +18,6 @@
 
 #include <drm/drm_mode.h>
 
-struct sh_mobile_meram_cfg;
-struct sh_mobile_meram_info;
-
 enum shmob_drm_clk_source {
        SHMOB_DRM_CLK_BUS,
        SHMOB_DRM_CLK_PERIPHERAL,
@@ -93,7 +90,6 @@ struct shmob_drm_platform_data {
        struct shmob_drm_interface_data iface;
        struct shmob_drm_panel_data panel;
        struct shmob_drm_backlight_data backlight;
-       const struct sh_mobile_meram_cfg *meram;
 };
 
 #endif /* __SHMOB_DRM_H__ */
index e4b257f..bc8206a 100644 (file)
@@ -109,7 +109,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
  *
  * The barrier() is needed to make sure compiler doesn't cache first element [1],
  * as this loop can be restarted [2]
- * [1] Documentation/atomic_ops.txt around line 114
+ * [1] Documentation/core-api/atomic_ops.rst around line 114
  * [2] Documentation/RCU/rculist_nulls.txt around line 146
  */
 #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member)                        \
index c94f466..19a690b 100644 (file)
@@ -4,7 +4,7 @@
 /*
  * Kernel Tracepoint API.
  *
- * See Documentation/trace/tracepoints.txt.
+ * See Documentation/trace/tracepoints.rst.
  *
  * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
index db9f15f..c0d7ea0 100644 (file)
@@ -170,7 +170,7 @@ struct prctl_mm_map {
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
  *
- * See Documentation/prctl/no_new_privs.txt for more details.
+ * See Documentation/userspace-api/no_new_privs.rst for more details.
  */
 #define PR_SET_NO_NEW_PRIVS    38
 #define PR_GET_NO_NEW_PRIVS    39
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h
deleted file mode 100644 (file)
index ac329ee..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Definitions for AUO-K190X framebuffer drivers
- *
- * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_VIDEO_AUO_K190XFB_H_
-#define _LINUX_VIDEO_AUO_K190XFB_H_
-
-/* Controller standby command needs a param */
-#define AUOK190X_QUIRK_STANDBYPARAM    (1 << 0)
-
-/* Controller standby is completely broken */
-#define AUOK190X_QUIRK_STANDBYBROKEN   (1 << 1)
-
-/*
- * Resolutions for the displays
- */
-#define AUOK190X_RESOLUTION_800_600            0
-#define AUOK190X_RESOLUTION_1024_768           1
-#define AUOK190X_RESOLUTION_600_800            4
-#define AUOK190X_RESOLUTION_768_1024           5
-
-/*
- * struct used by auok190x. board specific stuff comes from *board
- */
-struct auok190xfb_par {
-       struct fb_info *info;
-       struct auok190x_board *board;
-
-       struct regulator *regulator;
-
-       struct mutex io_lock;
-       struct delayed_work work;
-       wait_queue_head_t waitq;
-       int resolution;
-       int rotation;
-       int consecutive_threshold;
-       int update_cnt;
-
-       /* panel and controller informations */
-       int epd_type;
-       int panel_size_int;
-       int panel_size_float;
-       int panel_model;
-       int tcon_version;
-       int lut_version;
-
-       /* individual controller callbacks */
-       void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
-       void (*update_all)(struct auok190xfb_par *par);
-       bool (*need_refresh)(struct auok190xfb_par *par);
-       void (*init)(struct auok190xfb_par *par);
-       void (*recover)(struct auok190xfb_par *par);
-
-       int update_mode; /* mode to use for updates */
-       int last_mode; /* update mode last used */
-       int flash;
-
-       /* power management */
-       int autosuspend_delay;
-       bool standby;
-       bool manual_standby;
-};
-
-/**
- * Board specific platform-data
- * @init:              initialize the controller interface
- * @cleanup:           cleanup the controller interface
- * @wait_for_rdy:      wait until the controller is not busy anymore
- * @set_ctl:           change an interface control
- * @set_hdb:           write a value to the data register
- * @get_hdb:           read a value from the data register
- * @setup_irq:         method to setup the irq handling on the busy gpio
- * @gpio_nsleep:       sleep gpio
- * @gpio_nrst:         reset gpio
- * @gpio_nbusy:                busy gpio
- * @resolution:                one of the AUOK190X_RESOLUTION constants
- * @rotation:          rotation of the framebuffer
- * @quirks:            controller quirks to honor
- * @fps:               frames per second for defio
- */
-struct auok190x_board {
-       int (*init)(struct auok190xfb_par *);
-       void (*cleanup)(struct auok190xfb_par *);
-       int (*wait_for_rdy)(struct auok190xfb_par *);
-
-       void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8);
-       void (*set_hdb)(struct auok190xfb_par *, u16);
-       u16 (*get_hdb)(struct auok190xfb_par *);
-
-       int (*setup_irq)(struct fb_info *);
-
-       int gpio_nsleep;
-       int gpio_nrst;
-       int gpio_nbusy;
-
-       int resolution;
-       int quirks;
-       int fps;
-};
-
-#endif
index f706b0f..84aa976 100644 (file)
@@ -3,7 +3,6 @@
 #define __ASM_SH_MOBILE_LCDC_H__
 
 #include <linux/fb.h>
-#include <video/sh_mobile_meram.h>
 
 /* Register definitions */
 #define _LDDCKR                        0x410
@@ -184,7 +183,6 @@ struct sh_mobile_lcdc_chan_cfg {
        struct sh_mobile_lcdc_panel_cfg panel_cfg;
        struct sh_mobile_lcdc_bl_info bl_info;
        struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
-       const struct sh_mobile_meram_cfg *meram_cfg;
 
        struct platform_device *tx_dev; /* HDMI/DSI transmitter device */
 };
@@ -193,7 +191,6 @@ struct sh_mobile_lcdc_info {
        int clock_source;
        struct sh_mobile_lcdc_chan_cfg ch[2];
        struct sh_mobile_lcdc_overlay_cfg overlays[4];
-       struct sh_mobile_meram_info *meram_dev;
 };
 
 #endif /* __ASM_SH_MOBILE_LCDC_H__ */
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h
deleted file mode 100644 (file)
index f4efc21..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __VIDEO_SH_MOBILE_MERAM_H__
-#define __VIDEO_SH_MOBILE_MERAM_H__
-
-/* For sh_mobile_meram_info.addr_mode */
-enum {
-       SH_MOBILE_MERAM_MODE0 = 0,
-       SH_MOBILE_MERAM_MODE1
-};
-
-enum {
-       SH_MOBILE_MERAM_PF_NV = 0,
-       SH_MOBILE_MERAM_PF_RGB,
-       SH_MOBILE_MERAM_PF_NV24
-};
-
-
-struct sh_mobile_meram_priv;
-
-/*
- * struct sh_mobile_meram_info - MERAM platform data
- * @reserved_icbs: Bitmask of reserved ICBs (for instance used through UIO)
- */
-struct sh_mobile_meram_info {
-       int                             addr_mode;
-       u32                             reserved_icbs;
-       struct sh_mobile_meram_priv     *priv;
-       struct platform_device          *pdev;
-};
-
-/* icb config */
-struct sh_mobile_meram_icb_cfg {
-       unsigned int meram_size;        /* MERAM Buffer Size to use */
-};
-
-struct sh_mobile_meram_cfg {
-       struct sh_mobile_meram_icb_cfg icb[2];
-};
-
-#if defined(CONFIG_FB_SH_MOBILE_MERAM) || \
-    defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE)
-unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev,
-                                   size_t size);
-void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev,
-                         unsigned long mem, size_t size);
-void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev,
-                                 const struct sh_mobile_meram_cfg *cfg,
-                                 unsigned int xres, unsigned int yres,
-                                 unsigned int pixelformat,
-                                 unsigned int *pitch);
-void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data);
-void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data,
-                                 unsigned long base_addr_y,
-                                 unsigned long base_addr_c,
-                                 unsigned long *icb_addr_y,
-                                 unsigned long *icb_addr_c);
-#else
-static inline unsigned long
-sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size)
-{
-       return 0;
-}
-
-static inline void
-sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev,
-                    unsigned long mem, size_t size)
-{
-}
-
-static inline void *
-sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev,
-                           const struct sh_mobile_meram_cfg *cfg,
-                           unsigned int xres, unsigned int yres,
-                           unsigned int pixelformat,
-                           unsigned int *pitch)
-{
-       return ERR_PTR(-ENODEV);
-}
-
-static inline void
-sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data)
-{
-}
-
-static inline void
-sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data,
-                            unsigned long base_addr_y,
-                            unsigned long base_addr_c,
-                            unsigned long *icb_addr_y,
-                            unsigned long *icb_addr_c)
-{
-}
-#endif
-
-#endif /* __VIDEO_SH_MOBILE_MERAM_H__  */
index 2a9510a..e2340a4 100644 (file)
@@ -317,7 +317,7 @@ struct xenkbd_position {
  * Linux [2] and Windows [3] multi-touch support.
  *
  * [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml
- * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
+ * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.rst
  * [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx
  *
  *
index 5a52f07..fde3d09 100644 (file)
@@ -1719,10 +1719,6 @@ source "arch/Kconfig"
 
 endmenu                # General setup
 
-config HAVE_GENERIC_DMA_COHERENT
-       bool
-       default n
-
 config RT_MUTEXES
        bool
 
index d200162..04bc07c 100644 (file)
@@ -41,6 +41,7 @@ obj-y += printk/
 obj-y += irq/
 obj-y += rcu/
 obj-y += livepatch/
+obj-y += dma/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
index 52f368b..fba7804 100644 (file)
@@ -109,7 +109,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
        audit_update_mark(audit_mark, dentry->d_inode);
        audit_mark->rule = krule;
 
-       ret = fsnotify_add_mark(&audit_mark->mark, inode, NULL, true);
+       ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
        if (ret < 0) {
                fsnotify_put_mark(&audit_mark->mark);
                audit_mark = ERR_PTR(ret);
@@ -165,12 +165,11 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark)
 /* Update mark data in audit rules based on fsnotify events. */
 static int audit_mark_handle_event(struct fsnotify_group *group,
                                    struct inode *to_tell,
-                                   struct fsnotify_mark *inode_mark,
-                                   struct fsnotify_mark *vfsmount_mark,
                                    u32 mask, const void *data, int data_type,
                                    const unsigned char *dname, u32 cookie,
                                    struct fsnotify_iter_info *iter_info)
 {
+       struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
        struct audit_fsnotify_mark *audit_mark;
        const struct inode *inode = NULL;
 
index 67e6956..c99ebaa 100644 (file)
@@ -288,8 +288,8 @@ static void untag_chunk(struct node *p)
        if (!new)
                goto Fallback;
 
-       if (fsnotify_add_mark_locked(&new->mark, entry->connector->inode,
-                                    NULL, 1)) {
+       if (fsnotify_add_inode_mark_locked(&new->mark, entry->connector->inode,
+                                          1)) {
                fsnotify_put_mark(&new->mark);
                goto Fallback;
        }
@@ -354,7 +354,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
                return -ENOMEM;
 
        entry = &chunk->mark;
-       if (fsnotify_add_mark(entry, inode, NULL, 0)) {
+       if (fsnotify_add_inode_mark(entry, inode, 0)) {
                fsnotify_put_mark(entry);
                return -ENOSPC;
        }
@@ -434,8 +434,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
                return -ENOENT;
        }
 
-       if (fsnotify_add_mark_locked(chunk_entry,
-                            old_entry->connector->inode, NULL, 1)) {
+       if (fsnotify_add_inode_mark_locked(chunk_entry,
+                            old_entry->connector->inode, 1)) {
                spin_unlock(&old_entry->lock);
                mutex_unlock(&old_entry->group->mark_mutex);
                fsnotify_put_mark(chunk_entry);
@@ -989,8 +989,6 @@ static void evict_chunk(struct audit_chunk *chunk)
 
 static int audit_tree_handle_event(struct fsnotify_group *group,
                                   struct inode *to_tell,
-                                  struct fsnotify_mark *inode_mark,
-                                  struct fsnotify_mark *vfsmount_mark,
                                   u32 mask, const void *data, int data_type,
                                   const unsigned char *file_name, u32 cookie,
                                   struct fsnotify_iter_info *iter_info)
index f1ba889..c17c0c2 100644 (file)
@@ -160,7 +160,7 @@ static struct audit_parent *audit_init_parent(struct path *path)
 
        fsnotify_init_mark(&parent->mark, audit_watch_group);
        parent->mark.mask = AUDIT_FS_WATCH;
-       ret = fsnotify_add_mark(&parent->mark, inode, NULL, 0);
+       ret = fsnotify_add_inode_mark(&parent->mark, inode, 0);
        if (ret < 0) {
                audit_free_parent(parent);
                return ERR_PTR(ret);
@@ -472,12 +472,11 @@ void audit_remove_watch_rule(struct audit_krule *krule)
 /* Update watch data in audit rules based on fsnotify events. */
 static int audit_watch_handle_event(struct fsnotify_group *group,
                                    struct inode *to_tell,
-                                   struct fsnotify_mark *inode_mark,
-                                   struct fsnotify_mark *vfsmount_mark,
                                    u32 mask, const void *data, int data_type,
                                    const unsigned char *dname, u32 cookie,
                                    struct fsnotify_iter_info *iter_info)
 {
+       struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
        const struct inode *inode;
        struct audit_parent *parent;
 
index d8b12e0..266f10c 100644 (file)
@@ -605,7 +605,7 @@ static inline int nr_cpusets(void)
  * load balancing domains (sched domains) as specified by that partial
  * partition.
  *
- * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
+ * See "What is sched_load_balance" in Documentation/cgroup-v1/cpusets.txt
  * for a background explanation of this.
  *
  * Does not return errors, on the theory that the callers of this
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
new file mode 100644 (file)
index 0000000..9bd5430
--- /dev/null
@@ -0,0 +1,50 @@
+
+config HAS_DMA
+       bool
+       depends on !NO_DMA
+       default y
+
+config NEED_SG_DMA_LENGTH
+       bool
+
+config NEED_DMA_MAP_STATE
+       bool
+
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool 64BIT || PHYS_ADDR_T_64BIT
+
+config HAVE_GENERIC_DMA_COHERENT
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_CPU
+       bool
+       select NEED_DMA_MAP_STATE
+
+config DMA_DIRECT_OPS
+       bool
+       depends on HAS_DMA
+
+config DMA_NONCOHERENT_OPS
+       bool
+       depends on HAS_DMA
+       select DMA_DIRECT_OPS
+
+config DMA_NONCOHERENT_MMAP
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_NONCOHERENT_CACHE_SYNC
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_VIRT_OPS
+       bool
+       depends on HAS_DMA
+
+config SWIOTLB
+       bool
+       select DMA_DIRECT_OPS
+       select NEED_DMA_MAP_STATE
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
new file mode 100644 (file)
index 0000000..6de44e4
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_HAS_DMA)                  += mapping.o
+obj-$(CONFIG_DMA_CMA)                  += contiguous.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DIRECT_OPS)           += direct.o
+obj-$(CONFIG_DMA_NONCOHERENT_OPS)      += noncoherent.o
+obj-$(CONFIG_DMA_VIRT_OPS)             += virt.o
+obj-$(CONFIG_DMA_API_DEBUG)            += debug.o
+obj-$(CONFIG_SWIOTLB)                  += swiotlb.o
+
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
new file mode 100644 (file)
index 0000000..597d408
--- /dev/null
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Coherent per-device memory handling.
+ * Borrowed from i386
+ */
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+struct dma_coherent_mem {
+       void            *virt_base;
+       dma_addr_t      device_base;
+       unsigned long   pfn_base;
+       int             size;
+       int             flags;
+       unsigned long   *bitmap;
+       spinlock_t      spinlock;
+       bool            use_dev_dma_pfn_offset;
+};
+
+static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
+
+static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
+{
+       if (dev && dev->dma_mem)
+               return dev->dma_mem;
+       return NULL;
+}
+
+static inline dma_addr_t dma_get_device_base(struct device *dev,
+                                            struct dma_coherent_mem * mem)
+{
+       if (mem->use_dev_dma_pfn_offset)
+               return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
+       else
+               return mem->device_base;
+}
+
+static int dma_init_coherent_memory(
+       phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
+       struct dma_coherent_mem **mem)
+{
+       struct dma_coherent_mem *dma_mem = NULL;
+       void __iomem *mem_base = NULL;
+       int pages = size >> PAGE_SHIFT;
+       int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+       int ret;
+
+       if (!size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       mem_base = memremap(phys_addr, size, MEMREMAP_WC);
+       if (!mem_base) {
+               ret = -EINVAL;
+               goto out;
+       }
+       dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+       if (!dma_mem) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!dma_mem->bitmap) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       dma_mem->virt_base = mem_base;
+       dma_mem->device_base = device_addr;
+       dma_mem->pfn_base = PFN_DOWN(phys_addr);
+       dma_mem->size = pages;
+       dma_mem->flags = flags;
+       spin_lock_init(&dma_mem->spinlock);
+
+       *mem = dma_mem;
+       return 0;
+
+out:
+       kfree(dma_mem);
+       if (mem_base)
+               memunmap(mem_base);
+       return ret;
+}
+
+static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
+{
+       if (!mem)
+               return;
+
+       memunmap(mem->virt_base);
+       kfree(mem->bitmap);
+       kfree(mem);
+}
+
+static int dma_assign_coherent_memory(struct device *dev,
+                                     struct dma_coherent_mem *mem)
+{
+       if (!dev)
+               return -ENODEV;
+
+       if (dev->dma_mem)
+               return -EBUSY;
+
+       dev->dma_mem = mem;
+       return 0;
+}
+
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+                               dma_addr_t device_addr, size_t size, int flags)
+{
+       struct dma_coherent_mem *mem;
+       int ret;
+
+       ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
+       if (ret)
+               return ret;
+
+       ret = dma_assign_coherent_memory(dev, mem);
+       if (ret)
+               dma_release_coherent_memory(mem);
+       return ret;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+       struct dma_coherent_mem *mem = dev->dma_mem;
+
+       if (!mem)
+               return;
+       dma_release_coherent_memory(mem);
+       dev->dma_mem = NULL;
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+                                       dma_addr_t device_addr, size_t size)
+{
+       struct dma_coherent_mem *mem = dev->dma_mem;
+       unsigned long flags;
+       int pos, err;
+
+       size += device_addr & ~PAGE_MASK;
+
+       if (!mem)
+               return ERR_PTR(-EINVAL);
+
+       spin_lock_irqsave(&mem->spinlock, flags);
+       pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
+       err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+
+       if (err != 0)
+               return ERR_PTR(err);
+       return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+               ssize_t size, dma_addr_t *dma_handle)
+{
+       int order = get_order(size);
+       unsigned long flags;
+       int pageno;
+       void *ret;
+
+       spin_lock_irqsave(&mem->spinlock, flags);
+
+       if (unlikely(size > (mem->size << PAGE_SHIFT)))
+               goto err;
+
+       pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
+       if (unlikely(pageno < 0))
+               goto err;
+
+       /*
+        * Memory was found in the coherent area.
+        */
+       *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+       ret = mem->virt_base + (pageno << PAGE_SHIFT);
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+       memset(ret, 0, size);
+       return ret;
+err:
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+       return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev:       device from which we allocate memory
+ * @size:      size of requested memory area
+ * @dma_handle:        This will be filled with the correct dma handle
+ * @ret:       This pointer will be filled with the virtual address
+ *             to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+               dma_addr_t *dma_handle, void **ret)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       if (!mem)
+               return 0;
+
+       *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+       if (*ret)
+               return 1;
+
+       /*
+        * In the case where the allocation can not be satisfied from the
+        * per-device area, try to fall back to generic memory if the
+        * constraints allow it.
+        */
+       return mem->flags & DMA_MEMORY_EXCLUSIVE;
+}
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
+
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
+{
+       if (!dma_coherent_default_memory)
+               return NULL;
+
+       return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+                       dma_handle);
+}
+
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+                                      int order, void *vaddr)
+{
+       if (mem && vaddr >= mem->virt_base && vaddr <
+                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+               unsigned long flags;
+
+               spin_lock_irqsave(&mem->spinlock, flags);
+               bitmap_release_region(mem->bitmap, page, order);
+               spin_unlock_irqrestore(&mem->spinlock, flags);
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
+ * @dev:       device from which the memory was allocated
+ * @order:     the order of pages allocated
+ * @vaddr:     virtual address of allocated pages
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, releases that memory.
+ *
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
+ */
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+       if (!dma_coherent_default_memory)
+               return 0;
+
+       return __dma_release_from_coherent(dma_coherent_default_memory, order,
+                       vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+               struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
+       if (mem && vaddr >= mem->virt_base && vaddr + size <=
+                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               unsigned long off = vma->vm_pgoff;
+               int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+               int user_count = vma_pages(vma);
+               int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+               *ret = -ENXIO;
+               if (off < count && user_count <= count - off) {
+                       unsigned long pfn = mem->pfn_base + start + off;
+                       *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+                                              user_count << PAGE_SHIFT,
+                                              vma->vm_page_prot);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev:       device from which the memory was allocated
+ * @vma:       vm_area for the userspace memory
+ * @vaddr:     cpu address returned by dma_alloc_from_dev_coherent
+ * @size:      size of the memory buffer allocated
+ * @ret:       result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if @vaddr belongs to the device coherent pool and the caller
+ * should return @ret, or 0 if they should proceed with mapping memory from
+ * generic areas.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+                          void *vaddr, size_t size, int *ret)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+                                  size_t size, int *ret)
+{
+       if (!dma_coherent_default_memory)
+               return 0;
+
+       return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+                                       vaddr, size, ret);
+}
+
+/*
+ * Support for reserved memory regions defined in device tree
+ */
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static struct reserved_mem *dma_reserved_default_memory __initdata;
+
+static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+       struct dma_coherent_mem *mem = rmem->priv;
+       int ret;
+
+       if (!mem) {
+               ret = dma_init_coherent_memory(rmem->base, rmem->base,
+                                              rmem->size,
+                                              DMA_MEMORY_EXCLUSIVE, &mem);
+               if (ret) {
+                       pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+                               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+                       return ret;
+               }
+       }
+       mem->use_dev_dma_pfn_offset = true;
+       rmem->priv = mem;
+       dma_assign_coherent_memory(dev, mem);
+       return 0;
+}
+
+static void rmem_dma_device_release(struct reserved_mem *rmem,
+                                   struct device *dev)
+{
+       if (dev)
+               dev->dma_mem = NULL;
+}
+
+static const struct reserved_mem_ops rmem_dma_ops = {
+       .device_init    = rmem_dma_device_init,
+       .device_release = rmem_dma_device_release,
+};
+
+static int __init rmem_dma_setup(struct reserved_mem *rmem)
+{
+       unsigned long node = rmem->fdt_node;
+
+       if (of_get_flat_dt_prop(node, "reusable", NULL))
+               return -EINVAL;
+
+#ifdef CONFIG_ARM
+       if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
+               pr_err("Reserved memory: regions without no-map are not yet supported\n");
+               return -EINVAL;
+       }
+
+       if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
+               WARN(dma_reserved_default_memory,
+                    "Reserved memory: region for default DMA coherent area is redefined\n");
+               dma_reserved_default_memory = rmem;
+       }
+#endif
+
+       rmem->ops = &rmem_dma_ops;
+       pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
+               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+       return 0;
+}
+
+static int __init dma_init_reserved_memory(void)
+{
+       const struct reserved_mem_ops *ops;
+       int ret;
+
+       if (!dma_reserved_default_memory)
+               return -ENOMEM;
+
+       ops = dma_reserved_default_memory->ops;
+
+       /*
+        * We rely on rmem_dma_device_init() does not propagate error of
+        * dma_assign_coherent_memory() for "NULL" device.
+        */
+       ret = ops->device_init(dma_reserved_default_memory, NULL);
+
+       if (!ret) {
+               dma_coherent_default_memory = dma_reserved_default_memory->priv;
+               pr_info("DMA: default coherent area is set\n");
+       }
+
+       return ret;
+}
+
+core_initcall(dma_init_reserved_memory);
+
+RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
+#endif
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
new file mode 100644 (file)
index 0000000..d987dcd
--- /dev/null
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *     Marek Szyprowski <m.szyprowski@samsung.com>
+ *     Michal Nazarewicz <mina86@mina86.com>
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+#  define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/sizes.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+struct cma *dma_contiguous_default_area;
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is useful mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline = -1;
+static phys_addr_t base_cmdline;
+static phys_addr_t limit_cmdline;
+
+static int __init early_cma(char *p)
+{
+       pr_debug("%s(%s)\n", __func__, p);
+       size_cmdline = memparse(p, &p);
+       if (*p != '@')
+               return 0;
+       base_cmdline = memparse(p + 1, &p);
+       if (*p != '-') {
+               limit_cmdline = base_cmdline + size_cmdline;
+               return 0;
+       }
+       limit_cmdline = memparse(p + 1, &p);
+
+       return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
+{
+       struct memblock_region *reg;
+       unsigned long total_pages = 0;
+
+       /*
+        * We cannot use memblock_phys_mem_size() here, because
+        * memblock_analyze() has not been called yet.
+        */
+       for_each_memblock(memory, reg)
+               total_pages += memblock_region_memory_end_pfn(reg) -
+                              memblock_region_memory_base_pfn(reg);
+
+       return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
+{
+       return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+       phys_addr_t selected_size = 0;
+       phys_addr_t selected_base = 0;
+       phys_addr_t selected_limit = limit;
+       bool fixed = false;
+
+       pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+       if (size_cmdline != -1) {
+               selected_size = size_cmdline;
+               selected_base = base_cmdline;
+               selected_limit = min_not_zero(limit_cmdline, limit);
+               if (base_cmdline + size_cmdline == limit_cmdline)
+                       fixed = true;
+       } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+               selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+               selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+               selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+               selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+       }
+
+       if (selected_size && !dma_contiguous_default_area) {
+               pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+                        (unsigned long)selected_size / SZ_1M);
+
+               dma_contiguous_reserve_area(selected_size, selected_base,
+                                           selected_limit,
+                                           &dma_contiguous_default_area,
+                                           fixed);
+       }
+}
+
+/**
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Base address of the reserved area optional, use 0 for any
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
+ * @fixed: hint about where to place the reserved area
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
+ *
+ * If @fixed is true, reserve contiguous area at exactly @base.  If false,
+ * reserve in range from @base to @limit.
+ */
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+                                      phys_addr_t limit, struct cma **res_cma,
+                                      bool fixed)
+{
+       int ret;
+
+       ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
+                                       "reserved", res_cma);
+       if (ret)
+               return ret;
+
+       /* Architecture specific contiguous memory fixup. */
+       dma_contiguous_early_fixup(cma_get_base(*res_cma),
+                               cma_get_size(*res_cma));
+
+       return 0;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev:   Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ * @gfp_mask: GFP flags to use for this allocation.
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific dev_get_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+                                      unsigned int align, gfp_t gfp_mask)
+{
+       if (align > CONFIG_CMA_ALIGNMENT)
+               align = CONFIG_CMA_ALIGNMENT;
+
+       return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev:   Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count)
+{
+       return cma_release(dev_get_cma_area(dev), pages, count);
+}
+
+/*
+ * Support for reserved memory regions defined in device tree
+ */
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+       dev_set_cma_area(dev, rmem->priv);
+       return 0;
+}
+
+static void rmem_cma_device_release(struct reserved_mem *rmem,
+                                   struct device *dev)
+{
+       dev_set_cma_area(dev, NULL);
+}
+
+static const struct reserved_mem_ops rmem_cma_ops = {
+       .device_init    = rmem_cma_device_init,
+       .device_release = rmem_cma_device_release,
+};
+
+static int __init rmem_cma_setup(struct reserved_mem *rmem)
+{
+       phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+       phys_addr_t mask = align - 1;
+       unsigned long node = rmem->fdt_node;
+       struct cma *cma;
+       int err;
+
+       if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
+           of_get_flat_dt_prop(node, "no-map", NULL))
+               return -EINVAL;
+
+       if ((rmem->base & mask) || (rmem->size & mask)) {
+               pr_err("Reserved memory: incorrect alignment of CMA region\n");
+               return -EINVAL;
+       }
+
+       err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
+       if (err) {
+               pr_err("Reserved memory: unable to setup CMA region\n");
+               return err;
+       }
+       /* Architecture specific contiguous memory fixup. */
+       dma_contiguous_early_fixup(rmem->base, rmem->size);
+
+       if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
+               dma_contiguous_set_default(cma);
+
+       rmem->ops = &rmem_cma_ops;
+       rmem->priv = cma;
+
+       pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
+               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+
+       return 0;
+}
+RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
+#endif
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
new file mode 100644 (file)
index 0000000..c007d25
--- /dev/null
@@ -0,0 +1,1773 @@
+/*
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/sched/task_stack.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched/task.h>
+#include <linux/stacktrace.h>
+#include <linux/dma-debug.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/sections.h>
+
+#define HASH_SIZE       1024ULL
+#define HASH_FN_SHIFT   13
+#define HASH_FN_MASK    (HASH_SIZE - 1)
+
+/* allow architectures to override this if absolutely required */
+#ifndef PREALLOC_DMA_DEBUG_ENTRIES
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+#endif
+
+enum {
+       dma_debug_single,
+       dma_debug_page,
+       dma_debug_sg,
+       dma_debug_coherent,
+       dma_debug_resource,
+};
+
+enum map_err_types {
+       MAP_ERR_CHECK_NOT_APPLICABLE,
+       MAP_ERR_NOT_CHECKED,
+       MAP_ERR_CHECKED,
+};
+
+#define DMA_DEBUG_STACKTRACE_ENTRIES 5
+
+/**
+ * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
+ * @list: node on pre-allocated free_entries list
+ * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
+ * @type: single, page, sg, coherent
+ * @pfn: page frame of the start address
+ * @offset: offset of mapping relative to pfn
+ * @size: length of the mapping
+ * @direction: enum dma_data_direction
+ * @sg_call_ents: 'nents' from dma_map_sg
+ * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
+ * @map_err_type: track whether dma_mapping_error() was checked
+ * @stacktrace: support backtraces when a violation is detected
+ */
+struct dma_debug_entry {
+       struct list_head list;
+       struct device    *dev;
+       int              type;
+       unsigned long    pfn;
+       size_t           offset;
+       u64              dev_addr;
+       u64              size;
+       int              direction;
+       int              sg_call_ents;
+       int              sg_mapped_ents;
+       enum map_err_types  map_err_type;
+#ifdef CONFIG_STACKTRACE
+       struct           stack_trace stacktrace;
+       unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
+#endif
+};
+
+typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
+
+struct hash_bucket {
+       struct list_head list;
+       spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+/* Hash list to save the allocated dma addresses */
+static struct hash_bucket dma_entry_hash[HASH_SIZE];
+/* List of pre-allocated dma_debug_entry's */
+static LIST_HEAD(free_entries);
+/* Lock for the list above */
+static DEFINE_SPINLOCK(free_entries_lock);
+
+/* Global disable flag - will be set in case of an error */
+static bool global_disable __read_mostly;
+
+/* Early initialization disable flag, set at the end of dma_debug_init */
+static bool dma_debug_initialized __read_mostly;
+
+static inline bool dma_debug_disabled(void)
+{
+       return global_disable || !dma_debug_initialized;
+}
+
+/* Global error count */
+static u32 error_count;
+
+/* Global error show enable*/
+static u32 show_all_errors __read_mostly;
+/* Number of errors to show */
+static u32 show_num_errors = 1;
+
+static u32 num_free_entries;
+static u32 min_free_entries;
+static u32 nr_total_entries;
+
+/* number of preallocated entries requested by kernel cmdline */
+static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
+
+/* debugfs dentry's for the stuff above */
+static struct dentry *dma_debug_dent        __read_mostly;
+static struct dentry *global_disable_dent   __read_mostly;
+static struct dentry *error_count_dent      __read_mostly;
+static struct dentry *show_all_errors_dent  __read_mostly;
+static struct dentry *show_num_errors_dent  __read_mostly;
+static struct dentry *num_free_entries_dent __read_mostly;
+static struct dentry *min_free_entries_dent __read_mostly;
+static struct dentry *filter_dent           __read_mostly;
+
+/* per-driver filter related state */
+
+#define NAME_MAX_LEN   64
+
+static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
+static struct device_driver *current_driver                    __read_mostly;
+
+static DEFINE_RWLOCK(driver_name_lock);
+
+static const char *const maperr2str[] = {
+       [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
+       [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
+       [MAP_ERR_CHECKED] = "dma map error checked",
+};
+
+static const char *type2name[5] = { "single", "page",
+                                   "scather-gather", "coherent",
+                                   "resource" };
+
+static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
+                                  "DMA_FROM_DEVICE", "DMA_NONE" };
+
+/*
+ * The access to some variables in this macro is racy. We can't use atomic_t
+ * here because all these variables are exported to debugfs. Some of them even
+ * writeable. This is also the reason why a lock won't help much. But anyway,
+ * the races are no big deal. Here is why:
+ *
+ *   error_count: the addition is racy, but the worst thing that can happen is
+ *                that we don't count some errors
+ *   show_num_errors: the subtraction is racy. Also no big deal because in
+ *                    worst case this will result in one warning more in the
+ *                    system log than the user configured. This variable is
+ *                    writeable via debugfs.
+ */
+static inline void dump_entry_trace(struct dma_debug_entry *entry)
+{
+#ifdef CONFIG_STACKTRACE
+       if (entry) {
+               pr_warning("Mapped at:\n");
+               print_stack_trace(&entry->stacktrace, 0);
+       }
+#endif
+}
+
+static bool driver_filter(struct device *dev)
+{
+       struct device_driver *drv;
+       unsigned long flags;
+       bool ret;
+
+       /* driver filter off */
+       if (likely(!current_driver_name[0]))
+               return true;
+
+       /* driver filter on and initialized */
+       if (current_driver && dev && dev->driver == current_driver)
+               return true;
+
+       /* driver filter on, but we can't filter on a NULL device... */
+       if (!dev)
+               return false;
+
+       if (current_driver || !current_driver_name[0])
+               return false;
+
+       /* driver filter on but not yet initialized */
+       drv = dev->driver;
+       if (!drv)
+               return false;
+
+       /* lock to protect against change of current_driver_name */
+       read_lock_irqsave(&driver_name_lock, flags);
+
+       ret = false;
+       if (drv->name &&
+           strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
+               current_driver = drv;
+               ret = true;
+       }
+
+       read_unlock_irqrestore(&driver_name_lock, flags);
+
+       return ret;
+}
+
+#define err_printk(dev, entry, format, arg...) do {                    \
+               error_count += 1;                                       \
+               if (driver_filter(dev) &&                               \
+                   (show_all_errors || show_num_errors > 0)) {         \
+                       WARN(1, "%s %s: " format,                       \
+                            dev ? dev_driver_string(dev) : "NULL",     \
+                            dev ? dev_name(dev) : "NULL", ## arg);     \
+                       dump_entry_trace(entry);                        \
+               }                                                       \
+               if (!show_all_errors && show_num_errors > 0)            \
+                       show_num_errors -= 1;                           \
+       } while (0);
+
+/*
+ * Hash related functions
+ *
+ * Every DMA-API request is saved into a struct dma_debug_entry. To
+ * have quick access to these structs they are stored into a hash.
+ */
+static int hash_fn(struct dma_debug_entry *entry)
+{
+       /*
+        * Hash function is based on the dma address.
+        * We use bits 20-27 here as the index into the hash
+        */
+       return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
+}
+
+/*
+ * Request exclusive access to a hash bucket for a given dma_debug_entry.
+ */
+static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
+                                          unsigned long *flags)
+       __acquires(&dma_entry_hash[idx].lock)
+{
+       int idx = hash_fn(entry);
+       unsigned long __flags;
+
+       spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
+       *flags = __flags;
+       return &dma_entry_hash[idx];
+}
+
+/*
+ * Give up exclusive access to the hash bucket
+ */
+static void put_hash_bucket(struct hash_bucket *bucket,
+                           unsigned long *flags)
+       __releases(&bucket->lock)
+{
+       unsigned long __flags = *flags;
+
+       spin_unlock_irqrestore(&bucket->lock, __flags);
+}
+
+static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
+{
+       return ((a->dev_addr == b->dev_addr) &&
+               (a->dev == b->dev)) ? true : false;
+}
+
+static bool containing_match(struct dma_debug_entry *a,
+                            struct dma_debug_entry *b)
+{
+       if (a->dev != b->dev)
+               return false;
+
+       if ((b->dev_addr <= a->dev_addr) &&
+           ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
+               return true;
+
+       return false;
+}
+
+/*
+ * Search a given entry in the hash bucket list
+ */
+static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
+                                                 struct dma_debug_entry *ref,
+                                                 match_fn match)
+{
+       struct dma_debug_entry *entry, *ret = NULL;
+       int matches = 0, match_lvl, last_lvl = -1;
+
+       list_for_each_entry(entry, &bucket->list, list) {
+               if (!match(ref, entry))
+                       continue;
+
+               /*
+                * Some drivers map the same physical address multiple
+                * times. Without a hardware IOMMU this results in the
+                * same device addresses being put into the dma-debug
+                * hash multiple times too. This can result in false
+                * positives being reported. Therefore we implement a
+                * best-fit algorithm here which returns the entry from
+                * the hash which fits best to the reference value
+                * instead of the first-fit.
+                */
+               matches += 1;
+               match_lvl = 0;
+               entry->size         == ref->size         ? ++match_lvl : 0;
+               entry->type         == ref->type         ? ++match_lvl : 0;
+               entry->direction    == ref->direction    ? ++match_lvl : 0;
+               entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
+
+               if (match_lvl == 4) {
+                       /* perfect-fit - return the result */
+                       return entry;
+               } else if (match_lvl > last_lvl) {
+                       /*
+                        * We found an entry that fits better then the
+                        * previous one or it is the 1st match.
+                        */
+                       last_lvl = match_lvl;
+                       ret      = entry;
+               }
+       }
+
+       /*
+        * If we have multiple matches but no perfect-fit, just return
+        * NULL.
+        */
+       ret = (matches == 1) ? ret : NULL;
+
+       return ret;
+}
+
+static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
+                                                struct dma_debug_entry *ref)
+{
+       return __hash_bucket_find(bucket, ref, exact_match);
+}
+
+static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
+                                                  struct dma_debug_entry *ref,
+                                                  unsigned long *flags)
+{
+
+       unsigned int max_range = dma_get_max_seg_size(ref->dev);
+       struct dma_debug_entry *entry, index = *ref;
+       unsigned int range = 0;
+
+       while (range <= max_range) {
+               entry = __hash_bucket_find(*bucket, ref, containing_match);
+
+               if (entry)
+                       return entry;
+
+               /*
+                * Nothing found, go back a hash bucket
+                */
+               put_hash_bucket(*bucket, flags);
+               range          += (1 << HASH_FN_SHIFT);
+               index.dev_addr -= (1 << HASH_FN_SHIFT);
+               *bucket = get_hash_bucket(&index, flags);
+       }
+
+       return NULL;
+}
+
+/*
+ * Add an entry to a hash bucket
+ */
+static void hash_bucket_add(struct hash_bucket *bucket,
+                           struct dma_debug_entry *entry)
+{
+       list_add_tail(&entry->list, &bucket->list);
+}
+
+/*
+ * Remove entry from a hash bucket list
+ */
+static void hash_bucket_del(struct dma_debug_entry *entry)
+{
+       list_del(&entry->list);
+}
+
+static unsigned long long phys_addr(struct dma_debug_entry *entry)
+{
+       if (entry->type == dma_debug_resource)
+               return __pfn_to_phys(entry->pfn) + entry->offset;
+
+       return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
+}
+
+/*
+ * Dump mapping entries for debugging purposes
+ */
+void debug_dma_dump_mappings(struct device *dev)
+{
+       int idx;
+
+       for (idx = 0; idx < HASH_SIZE; idx++) {
+               struct hash_bucket *bucket = &dma_entry_hash[idx];
+               struct dma_debug_entry *entry;
+               unsigned long flags;
+
+               spin_lock_irqsave(&bucket->lock, flags);
+
+               list_for_each_entry(entry, &bucket->list, list) {
+                       if (!dev || dev == entry->dev) {
+                               dev_info(entry->dev,
+                                        "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
+                                        type2name[entry->type], idx,
+                                        phys_addr(entry), entry->pfn,
+                                        entry->dev_addr, entry->size,
+                                        dir2name[entry->direction],
+                                        maperr2str[entry->map_err_type]);
+                       }
+               }
+
+               spin_unlock_irqrestore(&bucket->lock, flags);
+       }
+}
+
+/*
+ * For each mapping (initial cacheline in the case of
+ * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+ * scatterlist, or the cacheline specified in dma_map_single) insert
+ * into this tree using the cacheline as the key. At
+ * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
+ * the entry already exists at insertion time add a tag as a reference
+ * count for the overlapping mappings.  For now, the overlap tracking
+ * just ensures that 'unmaps' balance 'maps' before marking the
+ * cacheline idle, but we should also be flagging overlaps as an API
+ * violation.
+ *
+ * Memory usage is mostly constrained by the maximum number of available
+ * dma-debug entries in that we need a free dma_debug_entry before
+ * inserting into the tree.  In the case of dma_map_page and
+ * dma_alloc_coherent there is only one dma_debug_entry and one
+ * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
+ * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+ * entries into the tree.
+ *
+ * At any time debug_dma_assert_idle() can be called to trigger a
+ * warning if any cachelines in the given page are in the active set.
+ */
+static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
+static DEFINE_SPINLOCK(radix_lock);
+#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
+
+static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+{
+       return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+               (entry->offset >> L1_CACHE_SHIFT);
+}
+
+static int active_cacheline_read_overlap(phys_addr_t cln)
+{
+       int overlap = 0, i;
+
+       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
+               if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
+                       overlap |= 1 << i;
+       return overlap;
+}
+
+static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
+{
+       int i;
+
+       if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
+               return overlap;
+
+       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
+               if (overlap & 1 << i)
+                       radix_tree_tag_set(&dma_active_cacheline, cln, i);
+               else
+                       radix_tree_tag_clear(&dma_active_cacheline, cln, i);
+
+       return overlap;
+}
+
+static void active_cacheline_inc_overlap(phys_addr_t cln)
+{
+       int overlap = active_cacheline_read_overlap(cln);
+
+       overlap = active_cacheline_set_overlap(cln, ++overlap);
+
+       /* If we overflowed the overlap counter then we're potentially
+        * leaking dma-mappings.  Otherwise, if maps and unmaps are
+        * balanced then this overflow may cause false negatives in
+        * debug_dma_assert_idle() as the cacheline may be marked idle
+        * prematurely.
+        */
+       WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
+                 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
+                 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
+}
+
+static int active_cacheline_dec_overlap(phys_addr_t cln)
+{
+       int overlap = active_cacheline_read_overlap(cln);
+
+       return active_cacheline_set_overlap(cln, --overlap);
+}
+
+static int active_cacheline_insert(struct dma_debug_entry *entry)
+{
+       phys_addr_t cln = to_cacheline_number(entry);
+       unsigned long flags;
+       int rc;
+
+       /* If the device is not writing memory then we don't have any
+        * concerns about the cpu consuming stale data.  This mitigates
+        * legitimate usages of overlapping mappings.
+        */
+       if (entry->direction == DMA_TO_DEVICE)
+               return 0;
+
+       spin_lock_irqsave(&radix_lock, flags);
+       rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
+       if (rc == -EEXIST)
+               active_cacheline_inc_overlap(cln);
+       spin_unlock_irqrestore(&radix_lock, flags);
+
+       return rc;
+}
+
+static void active_cacheline_remove(struct dma_debug_entry *entry)
+{
+       phys_addr_t cln = to_cacheline_number(entry);
+       unsigned long flags;
+
+       /* ...mirror the insert case */
+       if (entry->direction == DMA_TO_DEVICE)
+               return;
+
+       spin_lock_irqsave(&radix_lock, flags);
+       /* since we are counting overlaps the final put of the
+        * cacheline will occur when the overlap count is 0.
+        * active_cacheline_dec_overlap() returns -1 in that case
+        */
+       if (active_cacheline_dec_overlap(cln) < 0)
+               radix_tree_delete(&dma_active_cacheline, cln);
+       spin_unlock_irqrestore(&radix_lock, flags);
+}
+
+/**
+ * debug_dma_assert_idle() - assert that a page is not undergoing dma
+ * @page: page to lookup in the dma_active_cacheline tree
+ *
+ * Place a call to this routine in cases where the cpu touching the page
+ * before the dma completes (page is dma_unmapped) will lead to data
+ * corruption.
+ */
+void debug_dma_assert_idle(struct page *page)
+{
+       static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
+       struct dma_debug_entry *entry = NULL;
+       void **results = (void **) &ents;
+       unsigned int nents, i;
+       unsigned long flags;
+       phys_addr_t cln;
+
+       if (dma_debug_disabled())
+               return;
+
+       if (!page)
+               return;
+
+       cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
+       spin_lock_irqsave(&radix_lock, flags);
+       nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
+                                      CACHELINES_PER_PAGE);
+       for (i = 0; i < nents; i++) {
+               phys_addr_t ent_cln = to_cacheline_number(ents[i]);
+
+               if (ent_cln == cln) {
+                       entry = ents[i];
+                       break;
+               } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
+                       break;
+       }
+       spin_unlock_irqrestore(&radix_lock, flags);
+
+       if (!entry)
+               return;
+
+       cln = to_cacheline_number(entry);
+       err_printk(entry->dev, entry,
+                  "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
+                  &cln);
+}
+
+/*
+ * Wrapper function for adding an entry to the hash.
+ * This function takes care of locking itself.
+ */
+static void add_dma_entry(struct dma_debug_entry *entry)
+{
+       struct hash_bucket *bucket;
+       unsigned long flags;
+       int rc;
+
+       bucket = get_hash_bucket(entry, &flags);
+       hash_bucket_add(bucket, entry);
+       put_hash_bucket(bucket, &flags);
+
+       rc = active_cacheline_insert(entry);
+       if (rc == -ENOMEM) {
+               pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
+               global_disable = true;
+       }
+
+       /* TODO: report -EEXIST errors here as overlapping mappings are
+        * not supported by the DMA API
+        */
+}
+
+static struct dma_debug_entry *__dma_entry_alloc(void)
+{
+       struct dma_debug_entry *entry;
+
+       entry = list_entry(free_entries.next, struct dma_debug_entry, list);
+       list_del(&entry->list);
+       memset(entry, 0, sizeof(*entry));
+
+       num_free_entries -= 1;
+       if (num_free_entries < min_free_entries)
+               min_free_entries = num_free_entries;
+
+       return entry;
+}
+
+/* struct dma_entry allocator
+ *
+ * The next two functions implement the allocator for
+ * struct dma_debug_entries.
+ */
+static struct dma_debug_entry *dma_entry_alloc(void)
+{
+       struct dma_debug_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(&free_entries_lock, flags);
+
+       if (list_empty(&free_entries)) {
+               global_disable = true;
+               spin_unlock_irqrestore(&free_entries_lock, flags);
+               pr_err("DMA-API: debugging out of memory - disabling\n");
+               return NULL;
+       }
+
+       entry = __dma_entry_alloc();
+
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
+#ifdef CONFIG_STACKTRACE
+       entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
+       entry->stacktrace.entries = entry->st_entries;
+       entry->stacktrace.skip = 2;
+       save_stack_trace(&entry->stacktrace);
+#endif
+
+       return entry;
+}
+
+static void dma_entry_free(struct dma_debug_entry *entry)
+{
+       unsigned long flags;
+
+       active_cacheline_remove(entry);
+
+       /*
+        * add to beginning of the list - this way the entries are
+        * more likely cache hot when they are reallocated.
+        */
+       spin_lock_irqsave(&free_entries_lock, flags);
+       list_add(&entry->list, &free_entries);
+       num_free_entries += 1;
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+}
+
+int dma_debug_resize_entries(u32 num_entries)
+{
+       int i, delta, ret = 0;
+       unsigned long flags;
+       struct dma_debug_entry *entry;
+       LIST_HEAD(tmp);
+
+       spin_lock_irqsave(&free_entries_lock, flags);
+
+       if (nr_total_entries < num_entries) {
+               delta = num_entries - nr_total_entries;
+
+               spin_unlock_irqrestore(&free_entries_lock, flags);
+
+               for (i = 0; i < delta; i++) {
+                       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+                       if (!entry)
+                               break;
+
+                       list_add_tail(&entry->list, &tmp);
+               }
+
+               spin_lock_irqsave(&free_entries_lock, flags);
+
+               list_splice(&tmp, &free_entries);
+               nr_total_entries += i;
+               num_free_entries += i;
+       } else {
+               delta = nr_total_entries - num_entries;
+
+               for (i = 0; i < delta && !list_empty(&free_entries); i++) {
+                       entry = __dma_entry_alloc();
+                       kfree(entry);
+               }
+
+               nr_total_entries -= i;
+       }
+
+       if (nr_total_entries != num_entries)
+               ret = 1;
+
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
+       return ret;
+}
+
+/*
+ * DMA-API debugging init code
+ *
+ * The init code does two things:
+ *   1. Initialize core data structures
+ *   2. Preallocate a given number of dma_debug_entry structs
+ */
+
+static int prealloc_memory(u32 num_entries)
+{
+       struct dma_debug_entry *entry, *next_entry;
+       int i;
+
+       for (i = 0; i < num_entries; ++i) {
+               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry)
+                       goto out_err;
+
+               list_add_tail(&entry->list, &free_entries);
+       }
+
+       num_free_entries = num_entries;
+       min_free_entries = num_entries;
+
+       pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
+
+       return 0;
+
+out_err:
+
+       list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
+               list_del(&entry->list);
+               kfree(entry);
+       }
+
+       return -ENOMEM;
+}
+
+static ssize_t filter_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       char buf[NAME_MAX_LEN + 1];
+       unsigned long flags;
+       int len;
+
+       if (!current_driver_name[0])
+               return 0;
+
+       /*
+        * We can't copy to userspace directly because current_driver_name can
+        * only be read under the driver_name_lock with irqs disabled. So
+        * create a temporary copy first.
+        */
+       read_lock_irqsave(&driver_name_lock, flags);
+       len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
+       read_unlock_irqrestore(&driver_name_lock, flags);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t filter_write(struct file *file, const char __user *userbuf,
+                           size_t count, loff_t *ppos)
+{
+       char buf[NAME_MAX_LEN];
+       unsigned long flags;
+       size_t len;
+       int i;
+
+       /*
+        * We can't copy from userspace directly. Access to
+        * current_driver_name is protected with a write_lock with irqs
+        * disabled. Since copy_from_user can fault and may sleep we
+        * need to copy to temporary buffer first
+        */
+       len = min(count, (size_t)(NAME_MAX_LEN - 1));
+       if (copy_from_user(buf, userbuf, len))
+               return -EFAULT;
+
+       buf[len] = 0;
+
+       write_lock_irqsave(&driver_name_lock, flags);
+
+       /*
+        * Now handle the string we got from userspace very carefully.
+        * The rules are:
+        *         - only use the first token we got
+        *         - token delimiter is everything looking like a space
+        *           character (' ', '\n', '\t' ...)
+        *
+        */
+       if (!isalnum(buf[0])) {
+               /*
+                * If the first character userspace gave us is not
+                * alphanumerical then assume the filter should be
+                * switched off.
+                */
+               if (current_driver_name[0])
+                       pr_info("DMA-API: switching off dma-debug driver filter\n");
+               current_driver_name[0] = 0;
+               current_driver = NULL;
+               goto out_unlock;
+       }
+
+       /*
+        * Now parse out the first token and use it as the name for the
+        * driver to filter for.
+        */
+       for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
+               current_driver_name[i] = buf[i];
+               if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
+                       break;
+       }
+       current_driver_name[i] = 0;
+       current_driver = NULL;
+
+       pr_info("DMA-API: enable driver filter for driver [%s]\n",
+               current_driver_name);
+
+out_unlock:
+       write_unlock_irqrestore(&driver_name_lock, flags);
+
+       return count;
+}
+
+static const struct file_operations filter_fops = {
+       .read  = filter_read,
+       .write = filter_write,
+       .llseek = default_llseek,
+};
+
+static int dma_debug_fs_init(void)
+{
+       dma_debug_dent = debugfs_create_dir("dma-api", NULL);
+       if (!dma_debug_dent) {
+               pr_err("DMA-API: can not create debugfs directory\n");
+               return -ENOMEM;
+       }
+
+       global_disable_dent = debugfs_create_bool("disabled", 0444,
+                       dma_debug_dent,
+                       &global_disable);
+       if (!global_disable_dent)
+               goto out_err;
+
+       error_count_dent = debugfs_create_u32("error_count", 0444,
+                       dma_debug_dent, &error_count);
+       if (!error_count_dent)
+               goto out_err;
+
+       show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
+                       dma_debug_dent,
+                       &show_all_errors);
+       if (!show_all_errors_dent)
+               goto out_err;
+
+       show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
+                       dma_debug_dent,
+                       &show_num_errors);
+       if (!show_num_errors_dent)
+               goto out_err;
+
+       num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
+                       dma_debug_dent,
+                       &num_free_entries);
+       if (!num_free_entries_dent)
+               goto out_err;
+
+       min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
+                       dma_debug_dent,
+                       &min_free_entries);
+       if (!min_free_entries_dent)
+               goto out_err;
+
+       filter_dent = debugfs_create_file("driver_filter", 0644,
+                                         dma_debug_dent, NULL, &filter_fops);
+       if (!filter_dent)
+               goto out_err;
+
+       return 0;
+
+out_err:
+       debugfs_remove_recursive(dma_debug_dent);
+
+       return -ENOMEM;
+}
+
+static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
+{
+       struct dma_debug_entry *entry;
+       unsigned long flags;
+       int count = 0, i;
+
+       for (i = 0; i < HASH_SIZE; ++i) {
+               spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
+               list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
+                       if (entry->dev == dev) {
+                               count += 1;
+                               *out_entry = entry;
+                       }
+               }
+               spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
+       }
+
+       return count;
+}
+
+static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct dma_debug_entry *uninitialized_var(entry);
+       int count;
+
+       if (dma_debug_disabled())
+               return 0;
+
+       switch (action) {
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               count = device_dma_allocations(dev, &entry);
+               if (count == 0)
+                       break;
+               err_printk(dev, entry, "DMA-API: device driver has pending "
+                               "DMA allocations while released from device "
+                               "[count=%d]\n"
+                               "One of leaked entries details: "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [mapped as %s]\n",
+                       count, entry->dev_addr, entry->size,
+                       dir2name[entry->direction], type2name[entry->type]);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void dma_debug_add_bus(struct bus_type *bus)
+{
+       struct notifier_block *nb;
+
+       if (dma_debug_disabled())
+               return;
+
+       nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+       if (nb == NULL) {
+               pr_err("dma_debug_add_bus: out of memory\n");
+               return;
+       }
+
+       nb->notifier_call = dma_debug_device_change;
+
+       bus_register_notifier(bus, nb);
+}
+
+static int dma_debug_init(void)
+{
+       int i;
+
+       /* Do not use dma_debug_initialized here, since we really want to be
+        * called to set dma_debug_initialized
+        */
+       if (global_disable)
+               return 0;
+
+       for (i = 0; i < HASH_SIZE; ++i) {
+               INIT_LIST_HEAD(&dma_entry_hash[i].list);
+               spin_lock_init(&dma_entry_hash[i].lock);
+       }
+
+       if (dma_debug_fs_init() != 0) {
+               pr_err("DMA-API: error creating debugfs entries - disabling\n");
+               global_disable = true;
+
+               return 0;
+       }
+
+       if (prealloc_memory(nr_prealloc_entries) != 0) {
+               pr_err("DMA-API: debugging out of memory error - disabled\n");
+               global_disable = true;
+
+               return 0;
+       }
+
+       nr_total_entries = num_free_entries;
+
+       dma_debug_initialized = true;
+
+       pr_info("DMA-API: debugging enabled by kernel config\n");
+       return 0;
+}
+core_initcall(dma_debug_init);
+
+static __init int dma_debug_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (strncmp(str, "off", 3) == 0) {
+               pr_info("DMA-API: debugging disabled on kernel command line\n");
+               global_disable = true;
+       }
+
+       return 0;
+}
+
+static __init int dma_debug_entries_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+       if (!get_option(&str, &nr_prealloc_entries))
+               nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
+       return 0;
+}
+
+__setup("dma_debug=", dma_debug_cmdline);
+__setup("dma_debug_entries=", dma_debug_entries_cmdline);
+
+static void check_unmap(struct dma_debug_entry *ref)
+{
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       bucket = get_hash_bucket(ref, &flags);
+       entry = bucket_find_exact(bucket, ref);
+
+       if (!entry) {
+               /* must drop lock before calling dma_mapping_error */
+               put_hash_bucket(bucket, &flags);
+
+               if (dma_mapping_error(ref->dev, ref->dev_addr)) {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free an "
+                                  "invalid DMA memory address\n");
+               } else {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free DMA "
+                                  "memory it has not allocated [device "
+                                  "address=0x%016llx] [size=%llu bytes]\n",
+                                  ref->dev_addr, ref->size);
+               }
+               return;
+       }
+
+       if (ref->size != entry->size) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with different size "
+                          "[device address=0x%016llx] [map size=%llu bytes] "
+                          "[unmap size=%llu bytes]\n",
+                          ref->dev_addr, entry->size, ref->size);
+       }
+
+       if (ref->type != entry->type) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with wrong function "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped as %s] [unmapped as %s]\n",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type], type2name[ref->type]);
+       } else if ((entry->type == dma_debug_coherent) &&
+                  (phys_addr(ref) != phys_addr(entry))) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with different CPU address "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[cpu alloc address=0x%016llx] "
+                          "[cpu free address=0x%016llx]",
+                          ref->dev_addr, ref->size,
+                          phys_addr(entry),
+                          phys_addr(ref));
+       }
+
+       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
+           ref->sg_call_ents != entry->sg_call_ents) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA sg list with different entry count "
+                          "[map count=%d] [unmap count=%d]\n",
+                          entry->sg_call_ents, ref->sg_call_ents);
+       }
+
+       /*
+        * This may be no bug in reality - but most implementations of the
+        * DMA API don't handle this properly, so check for it here
+        */
+       if (ref->direction != entry->direction) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with different direction "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped with %s] [unmapped with %s]\n",
+                          ref->dev_addr, ref->size,
+                          dir2name[entry->direction],
+                          dir2name[ref->direction]);
+       }
+
+       /*
+        * Drivers should use dma_mapping_error() to check the returned
+        * addresses of dma_map_single() and dma_map_page().
+        * If not, print this warning message. See Documentation/DMA-API.txt.
+        */
+       if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+               err_printk(ref->dev, entry,
+                          "DMA-API: device driver failed to check map error"
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped as %s]",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type]);
+       }
+
+       hash_bucket_del(entry);
+       dma_entry_free(entry);
+
+       put_hash_bucket(bucket, &flags);
+}
+
+static void check_for_stack(struct device *dev,
+                           struct page *page, size_t offset)
+{
+       void *addr;
+       struct vm_struct *stack_vm_area = task_stack_vm_area(current);
+
+       if (!stack_vm_area) {
+               /* Stack is direct-mapped. */
+               if (PageHighMem(page))
+                       return;
+               addr = page_address(page) + offset;
+               if (object_is_on_stack(addr))
+                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
+       } else {
+               /* Stack is vmalloced. */
+               int i;
+
+               for (i = 0; i < stack_vm_area->nr_pages; i++) {
+                       if (page != stack_vm_area->pages[i])
+                               continue;
+
+                       addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
+                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
+                       break;
+               }
+       }
+}
+
+static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
+{
+       unsigned long a1 = (unsigned long)addr;
+       unsigned long b1 = a1 + len;
+       unsigned long a2 = (unsigned long)start;
+       unsigned long b2 = (unsigned long)end;
+
+       return !(b1 <= a2 || a1 >= b2);
+}
+
+static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
+{
+       if (overlap(addr, len, _stext, _etext) ||
+           overlap(addr, len, __start_rodata, __end_rodata))
+               err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
+}
+
+static void check_sync(struct device *dev,
+                      struct dma_debug_entry *ref,
+                      bool to_cpu)
+{
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       bucket = get_hash_bucket(ref, &flags);
+
+       entry = bucket_find_contain(&bucket, ref, &flags);
+
+       if (!entry) {
+               err_printk(dev, NULL, "DMA-API: device driver tries "
+                               "to sync DMA memory it has not allocated "
+                               "[device address=0x%016llx] [size=%llu bytes]\n",
+                               (unsigned long long)ref->dev_addr, ref->size);
+               goto out;
+       }
+
+       if (ref->size > entry->size) {
+               err_printk(dev, entry, "DMA-API: device driver syncs"
+                               " DMA memory outside allocated range "
+                               "[device address=0x%016llx] "
+                               "[allocation size=%llu bytes] "
+                               "[sync offset+size=%llu]\n",
+                               entry->dev_addr, entry->size,
+                               ref->size);
+       }
+
+       if (entry->direction == DMA_BIDIRECTIONAL)
+               goto out;
+
+       if (ref->direction != entry->direction) {
+               err_printk(dev, entry, "DMA-API: device driver syncs "
+                               "DMA memory with different direction "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [synced with %s]\n",
+                               (unsigned long long)ref->dev_addr, entry->size,
+                               dir2name[entry->direction],
+                               dir2name[ref->direction]);
+       }
+
+       if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
+                     !(ref->direction == DMA_TO_DEVICE))
+               err_printk(dev, entry, "DMA-API: device driver syncs "
+                               "device read-only DMA memory for cpu "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [synced with %s]\n",
+                               (unsigned long long)ref->dev_addr, entry->size,
+                               dir2name[entry->direction],
+                               dir2name[ref->direction]);
+
+       if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
+                      !(ref->direction == DMA_FROM_DEVICE))
+               err_printk(dev, entry, "DMA-API: device driver syncs "
+                               "device write-only DMA memory to device "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [synced with %s]\n",
+                               (unsigned long long)ref->dev_addr, entry->size,
+                               dir2name[entry->direction],
+                               dir2name[ref->direction]);
+
+       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
+           ref->sg_call_ents != entry->sg_call_ents) {
+               err_printk(ref->dev, entry, "DMA-API: device driver syncs "
+                          "DMA sg list with different entry count "
+                          "[map count=%d] [sync count=%d]\n",
+                          entry->sg_call_ents, ref->sg_call_ents);
+       }
+
+out:
+       put_hash_bucket(bucket, &flags);
+}
+
+static void check_sg_segment(struct device *dev, struct scatterlist *sg)
+{
+#ifdef CONFIG_DMA_API_DEBUG_SG
+       unsigned int max_seg = dma_get_max_seg_size(dev);
+       u64 start, end, boundary = dma_get_seg_boundary(dev);
+
+       /*
+        * Either the driver forgot to set dma_parms appropriately, or
+        * whoever generated the list forgot to check them.
+        */
+       if (sg->length > max_seg)
+               err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
+                          sg->length, max_seg);
+       /*
+        * In some cases this could potentially be the DMA API
+        * implementation's fault, but it would usually imply that
+        * the scatterlist was built inappropriately to begin with.
+        */
+       start = sg_dma_address(sg);
+       end = start + sg_dma_len(sg) - 1;
+       if ((start ^ end) & ~boundary)
+               err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
+                          start, end, boundary);
+#endif
+}
+
+void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
+                       size_t size, int direction, dma_addr_t dma_addr,
+                       bool map_single)
+{
+       struct dma_debug_entry *entry;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       if (dma_mapping_error(dev, dma_addr))
+               return;
+
+       entry = dma_entry_alloc();
+       if (!entry)
+               return;
+
+       entry->dev       = dev;
+       entry->type      = dma_debug_page;
+       entry->pfn       = page_to_pfn(page);
+       entry->offset    = offset,
+       entry->dev_addr  = dma_addr;
+       entry->size      = size;
+       entry->direction = direction;
+       entry->map_err_type = MAP_ERR_NOT_CHECKED;
+
+       if (map_single)
+               entry->type = dma_debug_single;
+
+       check_for_stack(dev, page, offset);
+
+       if (!PageHighMem(page)) {
+               void *addr = page_address(page) + offset;
+
+               check_for_illegal_area(dev, addr, size);
+       }
+
+       add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_map_page);
+
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       struct dma_debug_entry ref;
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.dev = dev;
+       ref.dev_addr = dma_addr;
+       bucket = get_hash_bucket(&ref, &flags);
+
+       list_for_each_entry(entry, &bucket->list, list) {
+               if (!exact_match(&ref, entry))
+                       continue;
+
+               /*
+                * The same physical address can be mapped multiple
+                * times. Without a hardware IOMMU this results in the
+                * same device addresses being put into the dma-debug
+                * hash multiple times too. This can result in false
+                * positives being reported. Therefore we implement a
+                * best-fit algorithm here which updates the first entry
+                * from the hash which fits the reference value and is
+                * not currently listed as being checked.
+                */
+               if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+                       entry->map_err_type = MAP_ERR_CHECKED;
+                       break;
+               }
+       }
+
+       put_hash_bucket(bucket, &flags);
+}
+EXPORT_SYMBOL(debug_dma_mapping_error);
+
+void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+                         size_t size, int direction, bool map_single)
+{
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_page,
+               .dev            = dev,
+               .dev_addr       = addr,
+               .size           = size,
+               .direction      = direction,
+       };
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       if (map_single)
+               ref.type = dma_debug_single;
+
+       check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_unmap_page);
+
+void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+                     int nents, int mapped_ents, int direction)
+{
+       struct dma_debug_entry *entry;
+       struct scatterlist *s;
+       int i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sg, s, mapped_ents, i) {
+               entry = dma_entry_alloc();
+               if (!entry)
+                       return;
+
+               entry->type           = dma_debug_sg;
+               entry->dev            = dev;
+               entry->pfn            = page_to_pfn(sg_page(s));
+               entry->offset         = s->offset,
+               entry->size           = sg_dma_len(s);
+               entry->dev_addr       = sg_dma_address(s);
+               entry->direction      = direction;
+               entry->sg_call_ents   = nents;
+               entry->sg_mapped_ents = mapped_ents;
+
+               check_for_stack(dev, sg_page(s), s->offset);
+
+               if (!PageHighMem(sg_page(s))) {
+                       check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
+               }
+
+               check_sg_segment(dev, s);
+
+               add_dma_entry(entry);
+       }
+}
+EXPORT_SYMBOL(debug_dma_map_sg);
+
+static int get_nr_mapped_entries(struct device *dev,
+                                struct dma_debug_entry *ref)
+{
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+       int mapped_ents;
+
+       bucket       = get_hash_bucket(ref, &flags);
+       entry        = bucket_find_exact(bucket, ref);
+       mapped_ents  = 0;
+
+       if (entry)
+               mapped_ents = entry->sg_mapped_ents;
+       put_hash_bucket(bucket, &flags);
+
+       return mapped_ents;
+}
+
+void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+                       int nelems, int dir)
+{
+       struct scatterlist *s;
+       int mapped_ents = 0, i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sglist, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .pfn            = page_to_pfn(sg_page(s)),
+                       .offset         = s->offset,
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = dir,
+                       .sg_call_ents   = nelems,
+               };
+
+               if (mapped_ents && i >= mapped_ents)
+                       break;
+
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+               check_unmap(&ref);
+       }
+}
+EXPORT_SYMBOL(debug_dma_unmap_sg);
+
+void debug_dma_alloc_coherent(struct device *dev, size_t size,
+                             dma_addr_t dma_addr, void *virt)
+{
+       struct dma_debug_entry *entry;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       if (unlikely(virt == NULL))
+               return;
+
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
+               return;
+
+       entry = dma_entry_alloc();
+       if (!entry)
+               return;
+
+       entry->type      = dma_debug_coherent;
+       entry->dev       = dev;
+       entry->offset    = offset_in_page(virt);
+       entry->size      = size;
+       entry->dev_addr  = dma_addr;
+       entry->direction = DMA_BIDIRECTIONAL;
+
+       if (is_vmalloc_addr(virt))
+               entry->pfn = vmalloc_to_pfn(virt);
+       else
+               entry->pfn = page_to_pfn(virt_to_page(virt));
+
+       add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_alloc_coherent);
+
+void debug_dma_free_coherent(struct device *dev, size_t size,
+                        void *virt, dma_addr_t addr)
+{
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_coherent,
+               .dev            = dev,
+               .offset         = offset_in_page(virt),
+               .dev_addr       = addr,
+               .size           = size,
+               .direction      = DMA_BIDIRECTIONAL,
+       };
+
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
+               return;
+
+       if (is_vmalloc_addr(virt))
+               ref.pfn = vmalloc_to_pfn(virt);
+       else
+               ref.pfn = page_to_pfn(virt_to_page(virt));
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_free_coherent);
+
+void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
+                           int direction, dma_addr_t dma_addr)
+{
+       struct dma_debug_entry *entry;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       entry = dma_entry_alloc();
+       if (!entry)
+               return;
+
+       entry->type             = dma_debug_resource;
+       entry->dev              = dev;
+       entry->pfn              = PHYS_PFN(addr);
+       entry->offset           = offset_in_page(addr);
+       entry->size             = size;
+       entry->dev_addr         = dma_addr;
+       entry->direction        = direction;
+       entry->map_err_type     = MAP_ERR_NOT_CHECKED;
+
+       add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_map_resource);
+
+void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+                             size_t size, int direction)
+{
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_resource,
+               .dev            = dev,
+               .dev_addr       = dma_addr,
+               .size           = size,
+               .direction      = direction,
+       };
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_unmap_resource);
+
+void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                                  size_t size, int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, true);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
+
+void debug_dma_sync_single_for_device(struct device *dev,
+                                     dma_addr_t dma_handle, size_t size,
+                                     int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, false);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_for_device);
+
+void debug_dma_sync_single_range_for_cpu(struct device *dev,
+                                        dma_addr_t dma_handle,
+                                        unsigned long offset, size_t size,
+                                        int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = offset + size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, true);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
+
+void debug_dma_sync_single_range_for_device(struct device *dev,
+                                           dma_addr_t dma_handle,
+                                           unsigned long offset,
+                                           size_t size, int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = offset + size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, false);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
+
+void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                              int nelems, int direction)
+{
+       struct scatterlist *s;
+       int mapped_ents = 0, i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sg, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .pfn            = page_to_pfn(sg_page(s)),
+                       .offset         = s->offset,
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+                       .sg_call_ents   = nelems,
+               };
+
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+               if (i >= mapped_ents)
+                       break;
+
+               check_sync(dev, &ref, true);
+       }
+}
+EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
+
+void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                                 int nelems, int direction)
+{
+       struct scatterlist *s;
+       int mapped_ents = 0, i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sg, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .pfn            = page_to_pfn(sg_page(s)),
+                       .offset         = s->offset,
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+                       .sg_call_ents   = nelems,
+               };
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+               if (i >= mapped_ents)
+                       break;
+
+               check_sync(dev, &ref, false);
+       }
+}
+EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
+
+static int __init dma_debug_driver_setup(char *str)
+{
+       int i;
+
+       for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
+               current_driver_name[i] = *str;
+               if (*str == 0)
+                       break;
+       }
+
+       if (current_driver_name[0])
+               pr_info("DMA-API: enable driver filter for driver [%s]\n",
+                       current_driver_name);
+
+
+       return 1;
+}
+__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
new file mode 100644 (file)
index 0000000..8be8106
--- /dev/null
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map physical memory directly without using an IOMMU or
+ * flushing caches.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-contiguous.h>
+#include <linux/pfn.h>
+#include <linux/set_memory.h>
+
+#define DIRECT_MAPPING_ERROR           0
+
+/*
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but
+ * some use it for entirely different regions:
+ */
+#ifndef ARCH_ZONE_DMA_BITS
+#define ARCH_ZONE_DMA_BITS 24
+#endif
+
+/*
+ * For AMD SEV all DMA must be to unencrypted addresses.
+ */
+static inline bool force_dma_unencrypted(void)
+{
+       return sev_active();
+}
+
+static bool
+check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
+               const char *caller)
+{
+       if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
+               if (!dev->dma_mask) {
+                       dev_err(dev,
+                               "%s: call on device without dma_mask\n",
+                               caller);
+                       return false;
+               }
+
+               if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+                       dev_err(dev,
+                               "%s: overflow %pad+%zu of device mask %llx\n",
+                               caller, &dma_addr, size, *dev->dma_mask);
+               }
+               return false;
+       }
+       return true;
+}
+
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+{
+       dma_addr_t addr = force_dma_unencrypted() ?
+               __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
+       return addr + size - 1 <= dev->coherent_dma_mask;
+}
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       int page_order = get_order(size);
+       struct page *page = NULL;
+       void *ret;
+
+       /* we always manually zero the memory once we are done: */
+       gfp &= ~__GFP_ZERO;
+
+       /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
+       if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+               gfp |= GFP_DMA;
+       if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+               gfp |= GFP_DMA32;
+
+again:
+       /* CMA can be used only in the context which permits sleeping */
+       if (gfpflags_allow_blocking(gfp)) {
+               page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+               if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+                       dma_release_from_contiguous(dev, page, count);
+                       page = NULL;
+               }
+       }
+       if (!page)
+               page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
+
+       if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+               __free_pages(page, page_order);
+               page = NULL;
+
+               if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+                   dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
+                   !(gfp & (GFP_DMA32 | GFP_DMA))) {
+                       gfp |= GFP_DMA32;
+                       goto again;
+               }
+
+               if (IS_ENABLED(CONFIG_ZONE_DMA) &&
+                   dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+                   !(gfp & GFP_DMA)) {
+                       gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+                       goto again;
+               }
+       }
+
+       if (!page)
+               return NULL;
+       ret = page_address(page);
+       if (force_dma_unencrypted()) {
+               set_memory_decrypted((unsigned long)ret, 1 << page_order);
+               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+       } else {
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
+       }
+       memset(ret, 0, size);
+       return ret;
+}
+
+/*
+ * NOTE: this function must never look at the dma_addr argument, because we want
+ * to be able to use it as a helper for iommu implementations as well.
+ */
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_addr, unsigned long attrs)
+{
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned int page_order = get_order(size);
+
+       if (force_dma_unencrypted())
+               set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+       if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
+               free_pages((unsigned long)cpu_addr, page_order);
+}
+
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs)
+{
+       dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+
+       if (!check_addr(dev, dma_addr, size, __func__))
+               return DIRECT_MAPPING_ERROR;
+       return dma_addr;
+}
+
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+               enum dma_data_direction dir, unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sgl, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
+               if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
+                       return 0;
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nents;
+}
+
+int dma_direct_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_ZONE_DMA
+       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+               return 0;
+#else
+       /*
+        * Because 32-bit DMA masks are so common we expect every architecture
+        * to be able to satisfy them - either by not supporting more physical
+        * memory, or by providing a ZONE_DMA32.  If neither is the case, the
+        * architecture needs to use an IOMMU instead of the direct mapping.
+        */
+       if (mask < DMA_BIT_MASK(32))
+               return 0;
+#endif
+       /*
+        * Various PCI/PCIe bridges have broken support for > 32bit DMA even
+        * if the device itself might support it.
+        */
+       if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
+               return 0;
+       return 1;
+}
+
+int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr == DIRECT_MAPPING_ERROR;
+}
+
+const struct dma_map_ops dma_direct_ops = {
+       .alloc                  = dma_direct_alloc,
+       .free                   = dma_direct_free,
+       .map_page               = dma_direct_map_page,
+       .map_sg                 = dma_direct_map_sg,
+       .dma_supported          = dma_direct_supported,
+       .mapping_error          = dma_direct_mapping_error,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
new file mode 100644 (file)
index 0000000..d2a92dd
--- /dev/null
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch-independent dma-mapping routines
+ *
+ * Copyright (c) 2006  SUSE Linux Products GmbH
+ * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Managed DMA API
+ */
+struct dma_devres {
+       size_t          size;
+       void            *vaddr;
+       dma_addr_t      dma_handle;
+       unsigned long   attrs;
+};
+
+static void dmam_release(struct device *dev, void *res)
+{
+       struct dma_devres *this = res;
+
+       dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
+                       this->attrs);
+}
+
+static int dmam_match(struct device *dev, void *res, void *match_data)
+{
+       struct dma_devres *this = res, *match = match_data;
+
+       if (this->vaddr == match->vaddr) {
+               WARN_ON(this->size != match->size ||
+                       this->dma_handle != match->dma_handle);
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * dmam_alloc_coherent - Managed dma_alloc_coherent()
+ * @dev: Device to allocate coherent memory for
+ * @size: Size of allocation
+ * @dma_handle: Out argument for allocated DMA handle
+ * @gfp: Allocation flags
+ *
+ * Managed dma_alloc_coherent().  Memory allocated using this function
+ * will be automatically released on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void *dmam_alloc_coherent(struct device *dev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t gfp)
+{
+       struct dma_devres *dr;
+       void *vaddr;
+
+       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
+       if (!dr)
+               return NULL;
+
+       vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
+       if (!vaddr) {
+               devres_free(dr);
+               return NULL;
+       }
+
+       dr->vaddr = vaddr;
+       dr->dma_handle = *dma_handle;
+       dr->size = size;
+
+       devres_add(dev, dr);
+
+       return vaddr;
+}
+EXPORT_SYMBOL(dmam_alloc_coherent);
+
+/**
+ * dmam_free_coherent - Managed dma_free_coherent()
+ * @dev: Device to free coherent memory for
+ * @size: Size of allocation
+ * @vaddr: Virtual address of the memory to free
+ * @dma_handle: DMA handle of the memory to free
+ *
+ * Managed dma_free_coherent().
+ */
+void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+                       dma_addr_t dma_handle)
+{
+       struct dma_devres match_data = { size, vaddr, dma_handle };
+
+       dma_free_coherent(dev, size, vaddr, dma_handle);
+       WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
+}
+EXPORT_SYMBOL(dmam_free_coherent);
+
+/**
+ * dmam_alloc_attrs - Managed dma_alloc_attrs()
+ * @dev: Device to allocate non_coherent memory for
+ * @size: Size of allocation
+ * @dma_handle: Out argument for allocated DMA handle
+ * @gfp: Allocation flags
+ * @attrs: Flags in the DMA_ATTR_* namespace.
+ *
+ * Managed dma_alloc_attrs().  Memory allocated using this function will be
+ * automatically released on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       struct dma_devres *dr;
+       void *vaddr;
+
+       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
+       if (!dr)
+               return NULL;
+
+       vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
+       if (!vaddr) {
+               devres_free(dr);
+               return NULL;
+       }
+
+       dr->vaddr = vaddr;
+       dr->dma_handle = *dma_handle;
+       dr->size = size;
+       dr->attrs = attrs;
+
+       devres_add(dev, dr);
+
+       return vaddr;
+}
+EXPORT_SYMBOL(dmam_alloc_attrs);
+
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+
+static void dmam_coherent_decl_release(struct device *dev, void *res)
+{
+       dma_release_declared_memory(dev);
+}
+
+/**
+ * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
+ * @dev: Device to declare coherent memory for
+ * @phys_addr: Physical address of coherent memory to be declared
+ * @device_addr: Device address of coherent memory to be declared
+ * @size: Size of coherent memory to be declared
+ * @flags: Flags
+ *
+ * Managed dma_declare_coherent_memory().
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+                                dma_addr_t device_addr, size_t size, int flags)
+{
+       void *res;
+       int rc;
+
+       res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+
+       rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
+                                        flags);
+       if (!rc)
+               devres_add(dev, res);
+       else
+               devres_free(res);
+
+       return rc;
+}
+EXPORT_SYMBOL(dmam_declare_coherent_memory);
+
+/**
+ * dmam_release_declared_memory - Managed dma_release_declared_memory().
+ * @dev: Device to release declared coherent memory for
+ *
+ * Managed dmam_release_declared_memory().
+ */
+void dmam_release_declared_memory(struct device *dev)
+{
+       WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
+}
+EXPORT_SYMBOL(dmam_release_declared_memory);
+
+#endif
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ */
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                void *cpu_addr, dma_addr_t handle, size_t size)
+{
+       struct page *page = virt_to_page(cpu_addr);
+       int ret;
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (unlikely(ret))
+               return ret;
+
+       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return 0;
+}
+EXPORT_SYMBOL(dma_common_get_sgtable);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       int ret = -ENXIO;
+#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
+       unsigned long user_count = vma_pages(vma);
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off < count && user_count <= (count - off))
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     page_to_pfn(virt_to_page(cpu_addr)) + off,
+                                     user_count << PAGE_SHIFT,
+                                     vma->vm_page_prot);
+#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+
+       return ret;
+}
+EXPORT_SYMBOL(dma_common_mmap);
+
+#ifdef CONFIG_MMU
+static struct vm_struct *__dma_common_pages_remap(struct page **pages,
+                       size_t size, unsigned long vm_flags, pgprot_t prot,
+                       const void *caller)
+{
+       struct vm_struct *area;
+
+       area = get_vm_area_caller(size, vm_flags, caller);
+       if (!area)
+               return NULL;
+
+       if (map_vm_area(area, prot, pages)) {
+               vunmap(area->addr);
+               return NULL;
+       }
+
+       return area;
+}
+
+/*
+ * remaps an array of PAGE_SIZE pages into another vm_area
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_pages_remap(struct page **pages, size_t size,
+                       unsigned long vm_flags, pgprot_t prot,
+                       const void *caller)
+{
+       struct vm_struct *area;
+
+       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+       if (!area)
+               return NULL;
+
+       area->pages = pages;
+
+       return area->addr;
+}
+
+/*
+ * remaps an allocated contiguous region into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+                       unsigned long vm_flags,
+                       pgprot_t prot, const void *caller)
+{
+       int i;
+       struct page **pages;
+       struct vm_struct *area;
+
+       pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+       if (!pages)
+               return NULL;
+
+       for (i = 0; i < (size >> PAGE_SHIFT); i++)
+               pages[i] = nth_page(page, i);
+
+       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+
+       kfree(pages);
+
+       if (!area)
+               return NULL;
+       return area->addr;
+}
+
+/*
+ * unmaps a range previously mapped by dma_common_*_remap
+ */
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+{
+       struct vm_struct *area = find_vm_area(cpu_addr);
+
+       if (!area || (area->flags & vm_flags) != vm_flags) {
+               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+               return;
+       }
+
+       unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
+       vunmap(cpu_addr);
+}
+#endif
+
+/*
+ * enables DMA API use for a device
+ */
+int dma_configure(struct device *dev)
+{
+       if (dev->bus->dma_configure)
+               return dev->bus->dma_configure(dev);
+       return 0;
+}
+
+void dma_deconfigure(struct device *dev)
+{
+       of_dma_deconfigure(dev);
+       acpi_dma_deconfigure(dev);
+}
diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c
new file mode 100644 (file)
index 0000000..79e9a75
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Christoph Hellwig.
+ *
+ * DMA operations that map physical memory directly without providing cache
+ * coherence.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/scatterlist.h>
+
+static void dma_noncoherent_sync_single_for_device(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
+}
+
+static void dma_noncoherent_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs)
+{
+       dma_addr_t addr;
+
+       addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+       if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
+                               size, dir);
+       return addr;
+}
+
+static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
+       if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
+       return nents;
+}
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+}
+
+static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
+}
+#endif
+
+const struct dma_map_ops dma_noncoherent_ops = {
+       .alloc                  = arch_dma_alloc,
+       .free                   = arch_dma_free,
+       .mmap                   = arch_dma_mmap,
+       .sync_single_for_device = dma_noncoherent_sync_single_for_device,
+       .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
+       .map_page               = dma_noncoherent_map_page,
+       .map_sg                 = dma_noncoherent_map_sg,
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+       .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
+       .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
+       .unmap_page             = dma_noncoherent_unmap_page,
+       .unmap_sg               = dma_noncoherent_unmap_sg,
+#endif
+       .dma_supported          = dma_direct_supported,
+       .mapping_error          = dma_direct_mapping_error,
+       .cache_sync             = arch_dma_cache_sync,
+};
+EXPORT_SYMBOL(dma_noncoherent_ops);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
new file mode 100644 (file)
index 0000000..04b68d9
--- /dev/null
@@ -0,0 +1,1087 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * This implementation is a fallback for platforms that do not support
+ * I/O TLBs (aka DMA address translation hardware).
+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 03/05/07 davidm     Switch from PCI-DMA to generic device DMA API.
+ * 00/12/13 davidm     Rename to swiotlb.c and add mark_clean() to avoid
+ *                     unnecessary i-cache flushing.
+ * 04/07/.. ak         Better overflow handling. Assorted fixes.
+ * 05/09/10 linville   Add support for syncing ranges, support syncing for
+ *                     DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
+ * 08/12/11 beckyb     Add highmem support
+ */
+
+#include <linux/cache.h>
+#include <linux/dma-direct.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/swiotlb.h>
+#include <linux/pfn.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/scatterlist.h>
+#include <linux/mem_encrypt.h>
+#include <linux/set_memory.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/iommu-helper.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/swiotlb.h>
+
+#define OFFSET(val,align) ((unsigned long)     \
+                          ( (val) & ( (align) - 1)))
+
+#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+
+/*
+ * Minimum IO TLB size to bother booting with.  Systems with mainly
+ * 64bit capable cards will only lightly use the swiotlb.  If we can't
+ * allocate a contiguous 1MB, we're probably in trouble anyway.
+ */
+#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
+
+enum swiotlb_force swiotlb_force;
+
+/*
+ * Used to do a quick range check in swiotlb_tbl_unmap_single and
+ * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
+ * API.
+ */
+static phys_addr_t io_tlb_start, io_tlb_end;
+
+/*
+ * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
+ * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
+ */
+static unsigned long io_tlb_nslabs;
+
+/*
+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
+ */
+static unsigned long io_tlb_overflow = 32*1024;
+
+static phys_addr_t io_tlb_overflow_buffer;
+
+/*
+ * This is a free list describing the number of free entries available from
+ * each index
+ */
+static unsigned int *io_tlb_list;
+static unsigned int io_tlb_index;
+
+/*
+ * Max segment that we can provide which (if pages are contingous) will
+ * not be bounced (unless SWIOTLB_FORCE is set).
+ */
+unsigned int max_segment;
+
+/*
+ * We need to save away the original address corresponding to a mapped entry
+ * for the sync operations.
+ */
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+static phys_addr_t *io_tlb_orig_addr;
+
+/*
+ * Protect the above data structures in the map and unmap calls
+ */
+static DEFINE_SPINLOCK(io_tlb_lock);
+
+static int late_alloc;
+
+static int __init
+setup_io_tlb_npages(char *str)
+{
+       if (isdigit(*str)) {
+               io_tlb_nslabs = simple_strtoul(str, &str, 0);
+               /* avoid tail segment of size < IO_TLB_SEGSIZE */
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+       if (*str == ',')
+               ++str;
+       if (!strcmp(str, "force")) {
+               swiotlb_force = SWIOTLB_FORCE;
+       } else if (!strcmp(str, "noforce")) {
+               swiotlb_force = SWIOTLB_NO_FORCE;
+               io_tlb_nslabs = 1;
+       }
+
+       return 0;
+}
+early_param("swiotlb", setup_io_tlb_npages);
+/* make io_tlb_overflow tunable too? */
+
+unsigned long swiotlb_nr_tbl(void)
+{
+       return io_tlb_nslabs;
+}
+EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+
+unsigned int swiotlb_max_segment(void)
+{
+       return max_segment;
+}
+EXPORT_SYMBOL_GPL(swiotlb_max_segment);
+
+void swiotlb_set_max_segment(unsigned int val)
+{
+       if (swiotlb_force == SWIOTLB_FORCE)
+               max_segment = 1;
+       else
+               max_segment = rounddown(val, PAGE_SIZE);
+}
+
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
+unsigned long swiotlb_size_or_default(void)
+{
+       unsigned long size;
+
+       size = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       return size ? size : (IO_TLB_DEFAULT_SIZE);
+}
+
+static bool no_iotlb_memory;
+
+void swiotlb_print_info(void)
+{
+       unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+       unsigned char *vstart, *vend;
+
+       if (no_iotlb_memory) {
+               pr_warn("software IO TLB: No low mem\n");
+               return;
+       }
+
+       vstart = phys_to_virt(io_tlb_start);
+       vend = phys_to_virt(io_tlb_end);
+
+       printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+              (unsigned long long)io_tlb_start,
+              (unsigned long long)io_tlb_end,
+              bytes >> 20, vstart, vend - 1);
+}
+
+/*
+ * Early SWIOTLB allocation may be too early to allow an architecture to
+ * perform the desired operations.  This function allows the architecture to
+ * call SWIOTLB when the operations are possible.  It needs to be called
+ * before the SWIOTLB memory is used.
+ */
+void __init swiotlb_update_mem_attributes(void)
+{
+       void *vaddr;
+       unsigned long bytes;
+
+       if (no_iotlb_memory || late_alloc)
+               return;
+
+       vaddr = phys_to_virt(io_tlb_start);
+       bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
+       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+       memset(vaddr, 0, bytes);
+
+       vaddr = phys_to_virt(io_tlb_overflow_buffer);
+       bytes = PAGE_ALIGN(io_tlb_overflow);
+       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+       memset(vaddr, 0, bytes);
+}
+
+int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+{
+       void *v_overflow_buffer;
+       unsigned long i, bytes;
+
+       bytes = nslabs << IO_TLB_SHIFT;
+
+       io_tlb_nslabs = nslabs;
+       io_tlb_start = __pa(tlb);
+       io_tlb_end = io_tlb_start + bytes;
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       v_overflow_buffer = memblock_virt_alloc_low_nopanic(
+                                               PAGE_ALIGN(io_tlb_overflow),
+                                               PAGE_SIZE);
+       if (!v_overflow_buffer)
+               return -ENOMEM;
+
+       io_tlb_overflow_buffer = __pa(v_overflow_buffer);
+
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between io_tlb_start and io_tlb_end.
+        */
+       io_tlb_list = memblock_virt_alloc(
+                               PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
+                               PAGE_SIZE);
+       io_tlb_orig_addr = memblock_virt_alloc(
+                               PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
+                               PAGE_SIZE);
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
+
+       if (verbose)
+               swiotlb_print_info();
+
+       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+       return 0;
+}
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void  __init
+swiotlb_init(int verbose)
+{
+       size_t default_size = IO_TLB_DEFAULT_SIZE;
+       unsigned char *vstart;
+       unsigned long bytes;
+
+       if (!io_tlb_nslabs) {
+               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+
+       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       /* Get IO TLB memory from the low pages */
+       vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
+       if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+               return;
+
+       if (io_tlb_start)
+               memblock_free_early(io_tlb_start,
+                                   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+       pr_warn("Cannot allocate SWIOTLB buffer");
+       no_iotlb_memory = true;
+}
+
+/*
+ * Systems with larger DMA zones (those that don't support ISA) can
+ * initialize the swiotlb later using the slab allocator if needed.
+ * This should be just like above, but with some error catching.
+ */
+int
+swiotlb_late_init_with_default_size(size_t default_size)
+{
+       unsigned long bytes, req_nslabs = io_tlb_nslabs;
+       unsigned char *vstart = NULL;
+       unsigned int order;
+       int rc = 0;
+
+       if (!io_tlb_nslabs) {
+               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+
+       /*
+        * Get IO TLB memory from the low pages
+        */
+       order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
+       io_tlb_nslabs = SLABS_PER_PAGE << order;
+       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+               vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+                                                 order);
+               if (vstart)
+                       break;
+               order--;
+       }
+
+       if (!vstart) {
+               io_tlb_nslabs = req_nslabs;
+               return -ENOMEM;
+       }
+       if (order != get_order(bytes)) {
+               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
+                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+               io_tlb_nslabs = SLABS_PER_PAGE << order;
+       }
+       rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
+       if (rc)
+               free_pages((unsigned long)vstart, order);
+
+       return rc;
+}
+
+int
+swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+{
+       unsigned long i, bytes;
+       unsigned char *v_overflow_buffer;
+
+       bytes = nslabs << IO_TLB_SHIFT;
+
+       io_tlb_nslabs = nslabs;
+       io_tlb_start = virt_to_phys(tlb);
+       io_tlb_end = io_tlb_start + bytes;
+
+       set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
+       memset(tlb, 0, bytes);
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+                                                    get_order(io_tlb_overflow));
+       if (!v_overflow_buffer)
+               goto cleanup2;
+
+       set_memory_decrypted((unsigned long)v_overflow_buffer,
+                       io_tlb_overflow >> PAGE_SHIFT);
+       memset(v_overflow_buffer, 0, io_tlb_overflow);
+       io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
+
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between io_tlb_start and io_tlb_end.
+        */
+       io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
+                                     get_order(io_tlb_nslabs * sizeof(int)));
+       if (!io_tlb_list)
+               goto cleanup3;
+
+       io_tlb_orig_addr = (phys_addr_t *)
+               __get_free_pages(GFP_KERNEL,
+                                get_order(io_tlb_nslabs *
+                                          sizeof(phys_addr_t)));
+       if (!io_tlb_orig_addr)
+               goto cleanup4;
+
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
+
+       swiotlb_print_info();
+
+       late_alloc = 1;
+
+       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+
+       return 0;
+
+cleanup4:
+       free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+                                                        sizeof(int)));
+       io_tlb_list = NULL;
+cleanup3:
+       free_pages((unsigned long)v_overflow_buffer,
+                  get_order(io_tlb_overflow));
+       io_tlb_overflow_buffer = 0;
+cleanup2:
+       io_tlb_end = 0;
+       io_tlb_start = 0;
+       io_tlb_nslabs = 0;
+       max_segment = 0;
+       return -ENOMEM;
+}
+
+void __init swiotlb_exit(void)
+{
+       if (!io_tlb_orig_addr)
+               return;
+
+       if (late_alloc) {
+               free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
+                          get_order(io_tlb_overflow));
+               free_pages((unsigned long)io_tlb_orig_addr,
+                          get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
+               free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+                                                                sizeof(int)));
+               free_pages((unsigned long)phys_to_virt(io_tlb_start),
+                          get_order(io_tlb_nslabs << IO_TLB_SHIFT));
+       } else {
+               memblock_free_late(io_tlb_overflow_buffer,
+                                  PAGE_ALIGN(io_tlb_overflow));
+               memblock_free_late(__pa(io_tlb_orig_addr),
+                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
+               memblock_free_late(__pa(io_tlb_list),
+                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
+               memblock_free_late(io_tlb_start,
+                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+       }
+       io_tlb_nslabs = 0;
+       max_segment = 0;
+}
+
+int is_swiotlb_buffer(phys_addr_t paddr)
+{
+       return paddr >= io_tlb_start && paddr < io_tlb_end;
+}
+
+/*
+ * Bounce: copy the swiotlb buffer back to the original dma location
+ */
+static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
+                          size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(orig_addr);
+       unsigned char *vaddr = phys_to_virt(tlb_addr);
+
+       if (PageHighMem(pfn_to_page(pfn))) {
+               /* The buffer does not have a mapping.  Map it in and copy */
+               unsigned int offset = orig_addr & ~PAGE_MASK;
+               char *buffer;
+               unsigned int sz = 0;
+               unsigned long flags;
+
+               while (size) {
+                       sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+                       local_irq_save(flags);
+                       buffer = kmap_atomic(pfn_to_page(pfn));
+                       if (dir == DMA_TO_DEVICE)
+                               memcpy(vaddr, buffer + offset, sz);
+                       else
+                               memcpy(buffer + offset, vaddr, sz);
+                       kunmap_atomic(buffer);
+                       local_irq_restore(flags);
+
+                       size -= sz;
+                       pfn++;
+                       vaddr += sz;
+                       offset = 0;
+               }
+       } else if (dir == DMA_TO_DEVICE) {
+               memcpy(vaddr, phys_to_virt(orig_addr), size);
+       } else {
+               memcpy(phys_to_virt(orig_addr), vaddr, size);
+       }
+}
+
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+                                  dma_addr_t tbl_dma_addr,
+                                  phys_addr_t orig_addr, size_t size,
+                                  enum dma_data_direction dir,
+                                  unsigned long attrs)
+{
+       unsigned long flags;
+       phys_addr_t tlb_addr;
+       unsigned int nslots, stride, index, wrap;
+       int i;
+       unsigned long mask;
+       unsigned long offset_slots;
+       unsigned long max_slots;
+
+       if (no_iotlb_memory)
+               panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+
+       if (mem_encrypt_active())
+               pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+                            sme_active() ? "SME" : "SEV");
+
+       mask = dma_get_seg_boundary(hwdev);
+
+       tbl_dma_addr &= mask;
+
+       offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+
+       /*
+        * Carefully handle integer overflow which can occur when mask == ~0UL.
+        */
+       max_slots = mask + 1
+                   ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
+                   : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
+
+       /*
+        * For mappings greater than or equal to a page, we limit the stride
+        * (and hence alignment) to a page size.
+        */
+       nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+       if (size >= PAGE_SIZE)
+               stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+       else
+               stride = 1;
+
+       BUG_ON(!nslots);
+
+       /*
+        * Find suitable number of IO TLB entries size that will fit this
+        * request and allocate a buffer from that IO TLB pool.
+        */
+       spin_lock_irqsave(&io_tlb_lock, flags);
+       index = ALIGN(io_tlb_index, stride);
+       if (index >= io_tlb_nslabs)
+               index = 0;
+       wrap = index;
+
+       do {
+               while (iommu_is_span_boundary(index, nslots, offset_slots,
+                                             max_slots)) {
+                       index += stride;
+                       if (index >= io_tlb_nslabs)
+                               index = 0;
+                       if (index == wrap)
+                               goto not_found;
+               }
+
+               /*
+                * If we find a slot that indicates we have 'nslots' number of
+                * contiguous buffers, we allocate the buffers from that slot
+                * and mark the entries as '0' indicating unavailable.
+                */
+               if (io_tlb_list[index] >= nslots) {
+                       int count = 0;
+
+                       for (i = index; i < (int) (index + nslots); i++)
+                               io_tlb_list[i] = 0;
+                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
+                               io_tlb_list[i] = ++count;
+                       tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+                       /*
+                        * Update the indices to avoid searching in the next
+                        * round.
+                        */
+                       io_tlb_index = ((index + nslots) < io_tlb_nslabs
+                                       ? (index + nslots) : 0);
+
+                       goto found;
+               }
+               index += stride;
+               if (index >= io_tlb_nslabs)
+                       index = 0;
+       } while (index != wrap);
+
+not_found:
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+               dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
+       return SWIOTLB_MAP_ERROR;
+found:
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+
+       /*
+        * Save away the mapping from the original address to the DMA address.
+        * This is needed when we sync the memory.  Then we sync the buffer if
+        * needed.
+        */
+       for (i = 0; i < nslots; i++)
+               io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+           (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
+
+       return tlb_addr;
+}
+
+/*
+ * Allocates bounce buffer and returns its physical address.
+ */
+static phys_addr_t
+map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+          enum dma_data_direction dir, unsigned long attrs)
+{
+       dma_addr_t start_dma_addr;
+
+       if (swiotlb_force == SWIOTLB_NO_FORCE) {
+               dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
+                                    &phys);
+               return SWIOTLB_MAP_ERROR;
+       }
+
+       start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
+       return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
+                                     dir, attrs);
+}
+
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+                             size_t size, enum dma_data_direction dir,
+                             unsigned long attrs)
+{
+       unsigned long flags;
+       int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
+
+       /*
+        * First, sync the memory before unmapping the entry
+        */
+       if (orig_addr != INVALID_PHYS_ADDR &&
+           !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
+
+       /*
+        * Return the buffer to the free list by setting the corresponding
+        * entries to indicate the number of contiguous entries available.
+        * While returning the entries to the free list, we merge the entries
+        * with slots below and above the pool being returned.
+        */
+       spin_lock_irqsave(&io_tlb_lock, flags);
+       {
+               count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
+                        io_tlb_list[index + nslots] : 0);
+               /*
+                * Step 1: return the slots to the free list, merging the
+                * slots with superceeding slots
+                */
+               for (i = index + nslots - 1; i >= index; i--) {
+                       io_tlb_list[i] = ++count;
+                       io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+               }
+               /*
+                * Step 2: merge the returned slots with the preceding slots,
+                * if available (non zero)
+                */
+               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+                       io_tlb_list[i] = ++count;
+       }
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
+                            size_t size, enum dma_data_direction dir,
+                            enum dma_sync_target target)
+{
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
+
+       if (orig_addr == INVALID_PHYS_ADDR)
+               return;
+       orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
+
+       switch (target) {
+       case SYNC_FOR_CPU:
+               if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+                       swiotlb_bounce(orig_addr, tlb_addr,
+                                      size, DMA_FROM_DEVICE);
+               else
+                       BUG_ON(dir != DMA_TO_DEVICE);
+               break;
+       case SYNC_FOR_DEVICE:
+               if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+                       swiotlb_bounce(orig_addr, tlb_addr,
+                                      size, DMA_TO_DEVICE);
+               else
+                       BUG_ON(dir != DMA_FROM_DEVICE);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
+               size_t size)
+{
+       u64 mask = DMA_BIT_MASK(32);
+
+       if (dev && dev->coherent_dma_mask)
+               mask = dev->coherent_dma_mask;
+       return addr + size - 1 <= mask;
+}
+
+static void *
+swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               unsigned long attrs)
+{
+       phys_addr_t phys_addr;
+
+       if (swiotlb_force == SWIOTLB_NO_FORCE)
+               goto out_warn;
+
+       phys_addr = swiotlb_tbl_map_single(dev,
+                       __phys_to_dma(dev, io_tlb_start),
+                       0, size, DMA_FROM_DEVICE, attrs);
+       if (phys_addr == SWIOTLB_MAP_ERROR)
+               goto out_warn;
+
+       *dma_handle = __phys_to_dma(dev, phys_addr);
+       if (!dma_coherent_ok(dev, *dma_handle, size))
+               goto out_unmap;
+
+       memset(phys_to_virt(phys_addr), 0, size);
+       return phys_to_virt(phys_addr);
+
+out_unmap:
+       dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+               (unsigned long long)dev->coherent_dma_mask,
+               (unsigned long long)*dma_handle);
+
+       /*
+        * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+        * DMA_ATTR_SKIP_CPU_SYNC is optional.
+        */
+       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+                       DMA_ATTR_SKIP_CPU_SYNC);
+out_warn:
+       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+               dev_warn(dev,
+                       "swiotlb: coherent allocation failed, size=%zu\n",
+                       size);
+               dump_stack();
+       }
+       return NULL;
+}
+
+static bool swiotlb_free_buffer(struct device *dev, size_t size,
+               dma_addr_t dma_addr)
+{
+       phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
+
+       WARN_ON_ONCE(irqs_disabled());
+
+       if (!is_swiotlb_buffer(phys_addr))
+               return false;
+
+       /*
+        * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+        * DMA_ATTR_SKIP_CPU_SYNC is optional.
+        */
+       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+                                DMA_ATTR_SKIP_CPU_SYNC);
+       return true;
+}
+
+static void
+swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
+            int do_panic)
+{
+       if (swiotlb_force == SWIOTLB_NO_FORCE)
+               return;
+
+       /*
+        * Ran out of IOMMU space for this operation. This is very bad.
+        * Unfortunately the drivers cannot handle this operation properly.
+        * unless they check for dma_mapping_error (most don't)
+        * When the mapping is small enough return a static buffer to limit
+        * the damage, or panic when the transfer is too big.
+        */
+       dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
+                           size);
+
+       if (size <= io_tlb_overflow || !do_panic)
+               return;
+
+       if (dir == DMA_BIDIRECTIONAL)
+               panic("DMA: Random memory could be DMA accessed\n");
+       if (dir == DMA_FROM_DEVICE)
+               panic("DMA: Random memory could be DMA written\n");
+       if (dir == DMA_TO_DEVICE)
+               panic("DMA: Random memory could be DMA read\n");
+}
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode.  The
+ * physical address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory until
+ * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
+ */
+dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+                           unsigned long offset, size_t size,
+                           enum dma_data_direction dir,
+                           unsigned long attrs)
+{
+       phys_addr_t map, phys = page_to_phys(page) + offset;
+       dma_addr_t dev_addr = phys_to_dma(dev, phys);
+
+       BUG_ON(dir == DMA_NONE);
+       /*
+        * If the address happens to be in the device's DMA window,
+        * we can safely return the device addr and not worry about bounce
+        * buffering it.
+        */
+       if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
+               return dev_addr;
+
+       trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
+       /* Oh well, have to allocate and map a bounce buffer. */
+       map = map_single(dev, phys, size, dir, attrs);
+       if (map == SWIOTLB_MAP_ERROR) {
+               swiotlb_full(dev, size, dir, 1);
+               return __phys_to_dma(dev, io_tlb_overflow_buffer);
+       }
+
+       dev_addr = __phys_to_dma(dev, map);
+
+       /* Ensure that the address returned is DMA'ble */
+       if (dma_capable(dev, dev_addr, size))
+               return dev_addr;
+
+       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+       swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+       return __phys_to_dma(dev, io_tlb_overflow_buffer);
+}
+
+/*
+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
+ * match what was provided for in a previous swiotlb_map_page call.  All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+                        size_t size, enum dma_data_direction dir,
+                        unsigned long attrs)
+{
+       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+
+       BUG_ON(dir == DMA_NONE);
+
+       if (is_swiotlb_buffer(paddr)) {
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
+               return;
+       }
+
+       if (dir != DMA_FROM_DEVICE)
+               return;
+
+       /*
+        * phys_to_virt doesn't work with hihgmem page but we could
+        * call dma_mark_clean() with hihgmem page here. However, we
+        * are fine since dma_mark_clean() is null on POWERPC. We can
+        * make dma_mark_clean() take a physical address if necessary.
+        */
+       dma_mark_clean(phys_to_virt(paddr), size);
+}
+
+void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+                       size_t size, enum dma_data_direction dir,
+                       unsigned long attrs)
+{
+       unmap_single(hwdev, dev_addr, size, dir, attrs);
+}
+
+/*
+ * Make physical memory consistent for a single streaming mode DMA translation
+ * after a transfer.
+ *
+ * If you perform a swiotlb_map_page() but wish to interrogate the buffer
+ * using the cpu, yet do not wish to teardown the dma mapping, you must
+ * call this function before doing so.  At the next point you give the dma
+ * address back to the card, you must first perform a
+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
+ */
+static void
+swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
+                   size_t size, enum dma_data_direction dir,
+                   enum dma_sync_target target)
+{
+       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+
+       BUG_ON(dir == DMA_NONE);
+
+       if (is_swiotlb_buffer(paddr)) {
+               swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
+               return;
+       }
+
+       if (dir != DMA_FROM_DEVICE)
+               return;
+
+       dma_mark_clean(phys_to_virt(paddr), size);
+}
+
+void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+                           size_t size, enum dma_data_direction dir)
+{
+       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+                              size_t size, enum dma_data_direction dir)
+{
+       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+/*
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the above swiotlb_map_page
+ * interface.  Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length.  They are obtained via
+ * sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ *       DMA address/length pairs than there are SG table elements.
+ *       (for example via virtual mapping capabilities)
+ *       The routine returns the number of addr/length pairs actually
+ *       used, at most nents.
+ *
+ * Device ownership issues as mentioned above for swiotlb_map_page are the
+ * same here.
+ */
+int
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+                    enum dma_data_direction dir, unsigned long attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       BUG_ON(dir == DMA_NONE);
+
+       for_each_sg(sgl, sg, nelems, i) {
+               phys_addr_t paddr = sg_phys(sg);
+               dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
+
+               if (swiotlb_force == SWIOTLB_FORCE ||
+                   !dma_capable(hwdev, dev_addr, sg->length)) {
+                       phys_addr_t map = map_single(hwdev, sg_phys(sg),
+                                                    sg->length, dir, attrs);
+                       if (map == SWIOTLB_MAP_ERROR) {
+                               /* Don't panic here, we expect map_sg users
+                                  to do proper error handling. */
+                               swiotlb_full(hwdev, sg->length, dir, 0);
+                               attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+                               swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
+                                                      attrs);
+                               sg_dma_len(sgl) = 0;
+                               return 0;
+                       }
+                       sg->dma_address = __phys_to_dma(hwdev, map);
+               } else
+                       sg->dma_address = dev_addr;
+               sg_dma_len(sg) = sg->length;
+       }
+       return nelems;
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+void
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+                      int nelems, enum dma_data_direction dir,
+                      unsigned long attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       BUG_ON(dir == DMA_NONE);
+
+       for_each_sg(sgl, sg, nelems, i)
+               unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
+                            attrs);
+}
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA translations
+ * after a transfer.
+ *
+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
+ * and usage.
+ */
+static void
+swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
+               int nelems, enum dma_data_direction dir,
+               enum dma_sync_target target)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nelems, i)
+               swiotlb_sync_single(hwdev, sg->dma_address,
+                                   sg_dma_len(sg), dir, target);
+}
+
+void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+                       int nelems, enum dma_data_direction dir)
+{
+       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+                          int nelems, enum dma_data_direction dir)
+{
+       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+}
+
+int
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
+{
+       return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
+}
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.
+ */
+int
+swiotlb_dma_supported(struct device *hwdev, u64 mask)
+{
+       return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
+}
+
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       void *vaddr;
+
+       /* temporary workaround: */
+       if (gfp & __GFP_NOWARN)
+               attrs |= DMA_ATTR_NO_WARN;
+
+       /*
+        * Don't print a warning when the first allocation attempt fails.
+        * swiotlb_alloc_coherent() will print a warning when the DMA memory
+        * allocation ultimately failed.
+        */
+       gfp |= __GFP_NOWARN;
+
+       vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+       if (!vaddr)
+               vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
+       return vaddr;
+}
+
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_addr, unsigned long attrs)
+{
+       if (!swiotlb_free_buffer(dev, size, dma_addr))
+               dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+}
+
+const struct dma_map_ops swiotlb_dma_ops = {
+       .mapping_error          = swiotlb_dma_mapping_error,
+       .alloc                  = swiotlb_alloc,
+       .free                   = swiotlb_free,
+       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = swiotlb_sync_single_for_device,
+       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
+       .map_sg                 = swiotlb_map_sg_attrs,
+       .unmap_sg               = swiotlb_unmap_sg_attrs,
+       .map_page               = swiotlb_map_page,
+       .unmap_page             = swiotlb_unmap_page,
+       .dma_supported          = dma_direct_supported,
+};
diff --git a/kernel/dma/virt.c b/kernel/dma/virt.c
new file mode 100644 (file)
index 0000000..631ddec
--- /dev/null
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map to virtual addresses without flushing memory.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+static void *dma_virt_alloc(struct device *dev, size_t size,
+                           dma_addr_t *dma_handle, gfp_t gfp,
+                           unsigned long attrs)
+{
+       void *ret;
+
+       ret = (void *)__get_free_pages(gfp, get_order(size));
+       if (ret)
+               *dma_handle = (uintptr_t)ret;
+       return ret;
+}
+
+static void dma_virt_free(struct device *dev, size_t size,
+                         void *cpu_addr, dma_addr_t dma_addr,
+                         unsigned long attrs)
+{
+       free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
+                                   unsigned long offset, size_t size,
+                                   enum dma_data_direction dir,
+                                   unsigned long attrs)
+{
+       return (uintptr_t)(page_address(page) + offset);
+}
+
+static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
+                          int nents, enum dma_data_direction dir,
+                          unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sgl, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+               sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nents;
+}
+
+const struct dma_map_ops dma_virt_ops = {
+       .alloc                  = dma_virt_alloc,
+       .free                   = dma_virt_free,
+       .map_page               = dma_virt_map_page,
+       .map_sg                 = dma_virt_map_sg,
+};
+EXPORT_SYMBOL(dma_virt_ops);
index 705c236..d9706da 100644 (file)
@@ -455,8 +455,9 @@ struct kobject *power_kobj;
  * state - control system sleep states.
  *
  * show() returns available sleep state labels, which may be "mem", "standby",
- * "freeze" and "disk" (hibernation).  See Documentation/power/states.txt for a
- * description of what they mean.
+ * "freeze" and "disk" (hibernation).
+ * See Documentation/admin-guide/pm/sleep-states.rst for a description of
+ * what they mean.
  *
  * store() accepts one of those strings, translates it into the proper
  * enumerated value, and initiates a suspend transition.
index dd6c0a2..dcc0166 100644 (file)
@@ -12,22 +12,22 @@ config NOP_TRACER
 config HAVE_FTRACE_NMI_ENTER
        bool
        help
-         See Documentation/trace/ftrace-design.txt
+         See Documentation/trace/ftrace-design.rst
 
 config HAVE_FUNCTION_TRACER
        bool
        help
-         See Documentation/trace/ftrace-design.txt
+         See Documentation/trace/ftrace-design.rst
 
 config HAVE_FUNCTION_GRAPH_TRACER
        bool
        help
-         See Documentation/trace/ftrace-design.txt
+         See Documentation/trace/ftrace-design.rst
 
 config HAVE_DYNAMIC_FTRACE
        bool
        help
-         See Documentation/trace/ftrace-design.txt
+         See Documentation/trace/ftrace-design.rst
 
 config HAVE_DYNAMIC_FTRACE_WITH_REGS
        bool
@@ -35,12 +35,12 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS
 config HAVE_FTRACE_MCOUNT_RECORD
        bool
        help
-         See Documentation/trace/ftrace-design.txt
+         See Documentation/trace/ftrace-design.rst
 
 config HAVE_SYSCALL_TRACEPOINTS
        bool
        help
-         See Documentation/trace/ftrace-design.txt
+         See Documentation/trace/ftrace-design.rst
 
 config HAVE_FENTRY
        bool
@@ -448,7 +448,7 @@ config KPROBE_EVENTS
        help
          This allows the user to add tracing events (similar to tracepoints)
          on the fly via the ftrace interface. See
-         Documentation/trace/kprobetrace.txt for more details.
+         Documentation/trace/kprobetrace.rst for more details.
 
          Those events can be inserted wherever kprobes can probe, and record
          various register and memory values.
@@ -575,7 +575,7 @@ config MMIOTRACE
          implementation and works via page faults. Tracing is disabled by
          default and can be enabled at run-time.
 
-         See Documentation/trace/mmiotrace.txt.
+         See Documentation/trace/mmiotrace.rst.
          If you are not helping to develop drivers, say N.
 
 config TRACING_MAP
index 809fdd1..706836e 100644 (file)
@@ -405,7 +405,7 @@ config ASSOCIATIVE_ARRAY
 
          See:
 
-               Documentation/assoc_array.txt
+               Documentation/core-api/assoc_array.rst
 
          for more information.
 
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
-config HAS_DMA
-       bool
-       depends on !NO_DMA
-       default y
+source "kernel/dma/Kconfig"
 
 config SGL_ALLOC
        bool
        default n
 
-config NEED_SG_DMA_LENGTH
-       bool
-
-config NEED_DMA_MAP_STATE
-       bool
-
-config ARCH_DMA_ADDR_T_64BIT
-       def_bool 64BIT || PHYS_ADDR_T_64BIT
-
 config IOMMU_HELPER
        bool
 
-config ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       bool
-
-config ARCH_HAS_SYNC_DMA_FOR_CPU
-       bool
-       select NEED_DMA_MAP_STATE
-
-config DMA_DIRECT_OPS
-       bool
-       depends on HAS_DMA
-
-config DMA_NONCOHERENT_OPS
-       bool
-       depends on HAS_DMA
-       select DMA_DIRECT_OPS
-
-config DMA_NONCOHERENT_MMAP
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_NONCOHERENT_CACHE_SYNC
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_VIRT_OPS
-       bool
-       depends on HAS_DMA
-
-config SWIOTLB
-       bool
-       select DMA_DIRECT_OPS
-       select NEED_DMA_MAP_STATE
-
 config CHECK_SIGNATURE
        bool
 
index 956b320..8153fda 100644 (file)
@@ -29,9 +29,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
 lib-$(CONFIG_PRINTK) += dump_stack.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
 
 lib-y  += kobject.o klist.o
 obj-y  += lockref.o
@@ -148,7 +145,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
@@ -169,8 +165,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
 
 obj-$(CONFIG_LRU_CACHE) += lru_cache.o
 
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
 obj-$(CONFIG_GENERIC_CSUM) += checksum.o
 
 obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
deleted file mode 100644 (file)
index c007d25..0000000
+++ /dev/null
@@ -1,1773 +0,0 @@
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#include <linux/sched/task_stack.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched/task.h>
-#include <linux/stacktrace.h>
-#include <linux/dma-debug.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/ctype.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/sections.h>
-
-#define HASH_SIZE       1024ULL
-#define HASH_FN_SHIFT   13
-#define HASH_FN_MASK    (HASH_SIZE - 1)
-
-/* allow architectures to override this if absolutely required */
-#ifndef PREALLOC_DMA_DEBUG_ENTRIES
-#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-#endif
-
-enum {
-       dma_debug_single,
-       dma_debug_page,
-       dma_debug_sg,
-       dma_debug_coherent,
-       dma_debug_resource,
-};
-
-enum map_err_types {
-       MAP_ERR_CHECK_NOT_APPLICABLE,
-       MAP_ERR_NOT_CHECKED,
-       MAP_ERR_CHECKED,
-};
-
-#define DMA_DEBUG_STACKTRACE_ENTRIES 5
-
-/**
- * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
- * @list: node on pre-allocated free_entries list
- * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
- * @type: single, page, sg, coherent
- * @pfn: page frame of the start address
- * @offset: offset of mapping relative to pfn
- * @size: length of the mapping
- * @direction: enum dma_data_direction
- * @sg_call_ents: 'nents' from dma_map_sg
- * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
- * @map_err_type: track whether dma_mapping_error() was checked
- * @stacktrace: support backtraces when a violation is detected
- */
-struct dma_debug_entry {
-       struct list_head list;
-       struct device    *dev;
-       int              type;
-       unsigned long    pfn;
-       size_t           offset;
-       u64              dev_addr;
-       u64              size;
-       int              direction;
-       int              sg_call_ents;
-       int              sg_mapped_ents;
-       enum map_err_types  map_err_type;
-#ifdef CONFIG_STACKTRACE
-       struct           stack_trace stacktrace;
-       unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
-#endif
-};
-
-typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
-
-struct hash_bucket {
-       struct list_head list;
-       spinlock_t lock;
-} ____cacheline_aligned_in_smp;
-
-/* Hash list to save the allocated dma addresses */
-static struct hash_bucket dma_entry_hash[HASH_SIZE];
-/* List of pre-allocated dma_debug_entry's */
-static LIST_HEAD(free_entries);
-/* Lock for the list above */
-static DEFINE_SPINLOCK(free_entries_lock);
-
-/* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
-
-/* Early initialization disable flag, set at the end of dma_debug_init */
-static bool dma_debug_initialized __read_mostly;
-
-static inline bool dma_debug_disabled(void)
-{
-       return global_disable || !dma_debug_initialized;
-}
-
-/* Global error count */
-static u32 error_count;
-
-/* Global error show enable*/
-static u32 show_all_errors __read_mostly;
-/* Number of errors to show */
-static u32 show_num_errors = 1;
-
-static u32 num_free_entries;
-static u32 min_free_entries;
-static u32 nr_total_entries;
-
-/* number of preallocated entries requested by kernel cmdline */
-static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
-
-/* debugfs dentry's for the stuff above */
-static struct dentry *dma_debug_dent        __read_mostly;
-static struct dentry *global_disable_dent   __read_mostly;
-static struct dentry *error_count_dent      __read_mostly;
-static struct dentry *show_all_errors_dent  __read_mostly;
-static struct dentry *show_num_errors_dent  __read_mostly;
-static struct dentry *num_free_entries_dent __read_mostly;
-static struct dentry *min_free_entries_dent __read_mostly;
-static struct dentry *filter_dent           __read_mostly;
-
-/* per-driver filter related state */
-
-#define NAME_MAX_LEN   64
-
-static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
-static struct device_driver *current_driver                    __read_mostly;
-
-static DEFINE_RWLOCK(driver_name_lock);
-
-static const char *const maperr2str[] = {
-       [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
-       [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
-       [MAP_ERR_CHECKED] = "dma map error checked",
-};
-
-static const char *type2name[5] = { "single", "page",
-                                   "scather-gather", "coherent",
-                                   "resource" };
-
-static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
-                                  "DMA_FROM_DEVICE", "DMA_NONE" };
-
-/*
- * The access to some variables in this macro is racy. We can't use atomic_t
- * here because all these variables are exported to debugfs. Some of them even
- * writeable. This is also the reason why a lock won't help much. But anyway,
- * the races are no big deal. Here is why:
- *
- *   error_count: the addition is racy, but the worst thing that can happen is
- *                that we don't count some errors
- *   show_num_errors: the subtraction is racy. Also no big deal because in
- *                    worst case this will result in one warning more in the
- *                    system log than the user configured. This variable is
- *                    writeable via debugfs.
- */
-static inline void dump_entry_trace(struct dma_debug_entry *entry)
-{
-#ifdef CONFIG_STACKTRACE
-       if (entry) {
-               pr_warning("Mapped at:\n");
-               print_stack_trace(&entry->stacktrace, 0);
-       }
-#endif
-}
-
-static bool driver_filter(struct device *dev)
-{
-       struct device_driver *drv;
-       unsigned long flags;
-       bool ret;
-
-       /* driver filter off */
-       if (likely(!current_driver_name[0]))
-               return true;
-
-       /* driver filter on and initialized */
-       if (current_driver && dev && dev->driver == current_driver)
-               return true;
-
-       /* driver filter on, but we can't filter on a NULL device... */
-       if (!dev)
-               return false;
-
-       if (current_driver || !current_driver_name[0])
-               return false;
-
-       /* driver filter on but not yet initialized */
-       drv = dev->driver;
-       if (!drv)
-               return false;
-
-       /* lock to protect against change of current_driver_name */
-       read_lock_irqsave(&driver_name_lock, flags);
-
-       ret = false;
-       if (drv->name &&
-           strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
-               current_driver = drv;
-               ret = true;
-       }
-
-       read_unlock_irqrestore(&driver_name_lock, flags);
-
-       return ret;
-}
-
-#define err_printk(dev, entry, format, arg...) do {                    \
-               error_count += 1;                                       \
-               if (driver_filter(dev) &&                               \
-                   (show_all_errors || show_num_errors > 0)) {         \
-                       WARN(1, "%s %s: " format,                       \
-                            dev ? dev_driver_string(dev) : "NULL",     \
-                            dev ? dev_name(dev) : "NULL", ## arg);     \
-                       dump_entry_trace(entry);                        \
-               }                                                       \
-               if (!show_all_errors && show_num_errors > 0)            \
-                       show_num_errors -= 1;                           \
-       } while (0);
-
-/*
- * Hash related functions
- *
- * Every DMA-API request is saved into a struct dma_debug_entry. To
- * have quick access to these structs they are stored into a hash.
- */
-static int hash_fn(struct dma_debug_entry *entry)
-{
-       /*
-        * Hash function is based on the dma address.
-        * We use bits 20-27 here as the index into the hash
-        */
-       return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
-}
-
-/*
- * Request exclusive access to a hash bucket for a given dma_debug_entry.
- */
-static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
-                                          unsigned long *flags)
-       __acquires(&dma_entry_hash[idx].lock)
-{
-       int idx = hash_fn(entry);
-       unsigned long __flags;
-
-       spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
-       *flags = __flags;
-       return &dma_entry_hash[idx];
-}
-
-/*
- * Give up exclusive access to the hash bucket
- */
-static void put_hash_bucket(struct hash_bucket *bucket,
-                           unsigned long *flags)
-       __releases(&bucket->lock)
-{
-       unsigned long __flags = *flags;
-
-       spin_unlock_irqrestore(&bucket->lock, __flags);
-}
-
-static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
-{
-       return ((a->dev_addr == b->dev_addr) &&
-               (a->dev == b->dev)) ? true : false;
-}
-
-static bool containing_match(struct dma_debug_entry *a,
-                            struct dma_debug_entry *b)
-{
-       if (a->dev != b->dev)
-               return false;
-
-       if ((b->dev_addr <= a->dev_addr) &&
-           ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
-               return true;
-
-       return false;
-}
-
-/*
- * Search a given entry in the hash bucket list
- */
-static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
-                                                 struct dma_debug_entry *ref,
-                                                 match_fn match)
-{
-       struct dma_debug_entry *entry, *ret = NULL;
-       int matches = 0, match_lvl, last_lvl = -1;
-
-       list_for_each_entry(entry, &bucket->list, list) {
-               if (!match(ref, entry))
-                       continue;
-
-               /*
-                * Some drivers map the same physical address multiple
-                * times. Without a hardware IOMMU this results in the
-                * same device addresses being put into the dma-debug
-                * hash multiple times too. This can result in false
-                * positives being reported. Therefore we implement a
-                * best-fit algorithm here which returns the entry from
-                * the hash which fits best to the reference value
-                * instead of the first-fit.
-                */
-               matches += 1;
-               match_lvl = 0;
-               entry->size         == ref->size         ? ++match_lvl : 0;
-               entry->type         == ref->type         ? ++match_lvl : 0;
-               entry->direction    == ref->direction    ? ++match_lvl : 0;
-               entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
-
-               if (match_lvl == 4) {
-                       /* perfect-fit - return the result */
-                       return entry;
-               } else if (match_lvl > last_lvl) {
-                       /*
-                        * We found an entry that fits better then the
-                        * previous one or it is the 1st match.
-                        */
-                       last_lvl = match_lvl;
-                       ret      = entry;
-               }
-       }
-
-       /*
-        * If we have multiple matches but no perfect-fit, just return
-        * NULL.
-        */
-       ret = (matches == 1) ? ret : NULL;
-
-       return ret;
-}
-
-static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
-                                                struct dma_debug_entry *ref)
-{
-       return __hash_bucket_find(bucket, ref, exact_match);
-}
-
-static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
-                                                  struct dma_debug_entry *ref,
-                                                  unsigned long *flags)
-{
-
-       unsigned int max_range = dma_get_max_seg_size(ref->dev);
-       struct dma_debug_entry *entry, index = *ref;
-       unsigned int range = 0;
-
-       while (range <= max_range) {
-               entry = __hash_bucket_find(*bucket, ref, containing_match);
-
-               if (entry)
-                       return entry;
-
-               /*
-                * Nothing found, go back a hash bucket
-                */
-               put_hash_bucket(*bucket, flags);
-               range          += (1 << HASH_FN_SHIFT);
-               index.dev_addr -= (1 << HASH_FN_SHIFT);
-               *bucket = get_hash_bucket(&index, flags);
-       }
-
-       return NULL;
-}
-
-/*
- * Add an entry to a hash bucket
- */
-static void hash_bucket_add(struct hash_bucket *bucket,
-                           struct dma_debug_entry *entry)
-{
-       list_add_tail(&entry->list, &bucket->list);
-}
-
-/*
- * Remove entry from a hash bucket list
- */
-static void hash_bucket_del(struct dma_debug_entry *entry)
-{
-       list_del(&entry->list);
-}
-
-static unsigned long long phys_addr(struct dma_debug_entry *entry)
-{
-       if (entry->type == dma_debug_resource)
-               return __pfn_to_phys(entry->pfn) + entry->offset;
-
-       return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
-}
-
-/*
- * Dump mapping entries for debugging purposes
- */
-void debug_dma_dump_mappings(struct device *dev)
-{
-       int idx;
-
-       for (idx = 0; idx < HASH_SIZE; idx++) {
-               struct hash_bucket *bucket = &dma_entry_hash[idx];
-               struct dma_debug_entry *entry;
-               unsigned long flags;
-
-               spin_lock_irqsave(&bucket->lock, flags);
-
-               list_for_each_entry(entry, &bucket->list, list) {
-                       if (!dev || dev == entry->dev) {
-                               dev_info(entry->dev,
-                                        "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
-                                        type2name[entry->type], idx,
-                                        phys_addr(entry), entry->pfn,
-                                        entry->dev_addr, entry->size,
-                                        dir2name[entry->direction],
-                                        maperr2str[entry->map_err_type]);
-                       }
-               }
-
-               spin_unlock_irqrestore(&bucket->lock, flags);
-       }
-}
-
-/*
- * For each mapping (initial cacheline in the case of
- * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
- * scatterlist, or the cacheline specified in dma_map_single) insert
- * into this tree using the cacheline as the key. At
- * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
- * the entry already exists at insertion time add a tag as a reference
- * count for the overlapping mappings.  For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the
- * cacheline idle, but we should also be flagging overlaps as an API
- * violation.
- *
- * Memory usage is mostly constrained by the maximum number of available
- * dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree.  In the case of dma_map_page and
- * dma_alloc_coherent there is only one dma_debug_entry and one
- * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
- * other hand, consumes a single dma_debug_entry, but inserts 'nents'
- * entries into the tree.
- *
- * At any time debug_dma_assert_idle() can be called to trigger a
- * warning if any cachelines in the given page are in the active set.
- */
-static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
-static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
-#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
-#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
-
-static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
-{
-       return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
-               (entry->offset >> L1_CACHE_SHIFT);
-}
-
-static int active_cacheline_read_overlap(phys_addr_t cln)
-{
-       int overlap = 0, i;
-
-       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
-               if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
-                       overlap |= 1 << i;
-       return overlap;
-}
-
-static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
-{
-       int i;
-
-       if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
-               return overlap;
-
-       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
-               if (overlap & 1 << i)
-                       radix_tree_tag_set(&dma_active_cacheline, cln, i);
-               else
-                       radix_tree_tag_clear(&dma_active_cacheline, cln, i);
-
-       return overlap;
-}
-
-static void active_cacheline_inc_overlap(phys_addr_t cln)
-{
-       int overlap = active_cacheline_read_overlap(cln);
-
-       overlap = active_cacheline_set_overlap(cln, ++overlap);
-
-       /* If we overflowed the overlap counter then we're potentially
-        * leaking dma-mappings.  Otherwise, if maps and unmaps are
-        * balanced then this overflow may cause false negatives in
-        * debug_dma_assert_idle() as the cacheline may be marked idle
-        * prematurely.
-        */
-       WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
-                 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
-                 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
-}
-
-static int active_cacheline_dec_overlap(phys_addr_t cln)
-{
-       int overlap = active_cacheline_read_overlap(cln);
-
-       return active_cacheline_set_overlap(cln, --overlap);
-}
-
-static int active_cacheline_insert(struct dma_debug_entry *entry)
-{
-       phys_addr_t cln = to_cacheline_number(entry);
-       unsigned long flags;
-       int rc;
-
-       /* If the device is not writing memory then we don't have any
-        * concerns about the cpu consuming stale data.  This mitigates
-        * legitimate usages of overlapping mappings.
-        */
-       if (entry->direction == DMA_TO_DEVICE)
-               return 0;
-
-       spin_lock_irqsave(&radix_lock, flags);
-       rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
-       if (rc == -EEXIST)
-               active_cacheline_inc_overlap(cln);
-       spin_unlock_irqrestore(&radix_lock, flags);
-
-       return rc;
-}
-
-static void active_cacheline_remove(struct dma_debug_entry *entry)
-{
-       phys_addr_t cln = to_cacheline_number(entry);
-       unsigned long flags;
-
-       /* ...mirror the insert case */
-       if (entry->direction == DMA_TO_DEVICE)
-               return;
-
-       spin_lock_irqsave(&radix_lock, flags);
-       /* since we are counting overlaps the final put of the
-        * cacheline will occur when the overlap count is 0.
-        * active_cacheline_dec_overlap() returns -1 in that case
-        */
-       if (active_cacheline_dec_overlap(cln) < 0)
-               radix_tree_delete(&dma_active_cacheline, cln);
-       spin_unlock_irqrestore(&radix_lock, flags);
-}
-
-/**
- * debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_cacheline tree
- *
- * Place a call to this routine in cases where the cpu touching the page
- * before the dma completes (page is dma_unmapped) will lead to data
- * corruption.
- */
-void debug_dma_assert_idle(struct page *page)
-{
-       static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
-       struct dma_debug_entry *entry = NULL;
-       void **results = (void **) &ents;
-       unsigned int nents, i;
-       unsigned long flags;
-       phys_addr_t cln;
-
-       if (dma_debug_disabled())
-               return;
-
-       if (!page)
-               return;
-
-       cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
-       spin_lock_irqsave(&radix_lock, flags);
-       nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
-                                      CACHELINES_PER_PAGE);
-       for (i = 0; i < nents; i++) {
-               phys_addr_t ent_cln = to_cacheline_number(ents[i]);
-
-               if (ent_cln == cln) {
-                       entry = ents[i];
-                       break;
-               } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
-                       break;
-       }
-       spin_unlock_irqrestore(&radix_lock, flags);
-
-       if (!entry)
-               return;
-
-       cln = to_cacheline_number(entry);
-       err_printk(entry->dev, entry,
-                  "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
-                  &cln);
-}
-
-/*
- * Wrapper function for adding an entry to the hash.
- * This function takes care of locking itself.
- */
-static void add_dma_entry(struct dma_debug_entry *entry)
-{
-       struct hash_bucket *bucket;
-       unsigned long flags;
-       int rc;
-
-       bucket = get_hash_bucket(entry, &flags);
-       hash_bucket_add(bucket, entry);
-       put_hash_bucket(bucket, &flags);
-
-       rc = active_cacheline_insert(entry);
-       if (rc == -ENOMEM) {
-               pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
-               global_disable = true;
-       }
-
-       /* TODO: report -EEXIST errors here as overlapping mappings are
-        * not supported by the DMA API
-        */
-}
-
-static struct dma_debug_entry *__dma_entry_alloc(void)
-{
-       struct dma_debug_entry *entry;
-
-       entry = list_entry(free_entries.next, struct dma_debug_entry, list);
-       list_del(&entry->list);
-       memset(entry, 0, sizeof(*entry));
-
-       num_free_entries -= 1;
-       if (num_free_entries < min_free_entries)
-               min_free_entries = num_free_entries;
-
-       return entry;
-}
-
-/* struct dma_entry allocator
- *
- * The next two functions implement the allocator for
- * struct dma_debug_entries.
- */
-static struct dma_debug_entry *dma_entry_alloc(void)
-{
-       struct dma_debug_entry *entry;
-       unsigned long flags;
-
-       spin_lock_irqsave(&free_entries_lock, flags);
-
-       if (list_empty(&free_entries)) {
-               global_disable = true;
-               spin_unlock_irqrestore(&free_entries_lock, flags);
-               pr_err("DMA-API: debugging out of memory - disabling\n");
-               return NULL;
-       }
-
-       entry = __dma_entry_alloc();
-
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-
-#ifdef CONFIG_STACKTRACE
-       entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
-       entry->stacktrace.entries = entry->st_entries;
-       entry->stacktrace.skip = 2;
-       save_stack_trace(&entry->stacktrace);
-#endif
-
-       return entry;
-}
-
-static void dma_entry_free(struct dma_debug_entry *entry)
-{
-       unsigned long flags;
-
-       active_cacheline_remove(entry);
-
-       /*
-        * add to beginning of the list - this way the entries are
-        * more likely cache hot when they are reallocated.
-        */
-       spin_lock_irqsave(&free_entries_lock, flags);
-       list_add(&entry->list, &free_entries);
-       num_free_entries += 1;
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-}
-
-int dma_debug_resize_entries(u32 num_entries)
-{
-       int i, delta, ret = 0;
-       unsigned long flags;
-       struct dma_debug_entry *entry;
-       LIST_HEAD(tmp);
-
-       spin_lock_irqsave(&free_entries_lock, flags);
-
-       if (nr_total_entries < num_entries) {
-               delta = num_entries - nr_total_entries;
-
-               spin_unlock_irqrestore(&free_entries_lock, flags);
-
-               for (i = 0; i < delta; i++) {
-                       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-                       if (!entry)
-                               break;
-
-                       list_add_tail(&entry->list, &tmp);
-               }
-
-               spin_lock_irqsave(&free_entries_lock, flags);
-
-               list_splice(&tmp, &free_entries);
-               nr_total_entries += i;
-               num_free_entries += i;
-       } else {
-               delta = nr_total_entries - num_entries;
-
-               for (i = 0; i < delta && !list_empty(&free_entries); i++) {
-                       entry = __dma_entry_alloc();
-                       kfree(entry);
-               }
-
-               nr_total_entries -= i;
-       }
-
-       if (nr_total_entries != num_entries)
-               ret = 1;
-
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-
-       return ret;
-}
-
-/*
- * DMA-API debugging init code
- *
- * The init code does two things:
- *   1. Initialize core data structures
- *   2. Preallocate a given number of dma_debug_entry structs
- */
-
-static int prealloc_memory(u32 num_entries)
-{
-       struct dma_debug_entry *entry, *next_entry;
-       int i;
-
-       for (i = 0; i < num_entries; ++i) {
-               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-               if (!entry)
-                       goto out_err;
-
-               list_add_tail(&entry->list, &free_entries);
-       }
-
-       num_free_entries = num_entries;
-       min_free_entries = num_entries;
-
-       pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
-
-       return 0;
-
-out_err:
-
-       list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-
-       return -ENOMEM;
-}
-
-static ssize_t filter_read(struct file *file, char __user *user_buf,
-                          size_t count, loff_t *ppos)
-{
-       char buf[NAME_MAX_LEN + 1];
-       unsigned long flags;
-       int len;
-
-       if (!current_driver_name[0])
-               return 0;
-
-       /*
-        * We can't copy to userspace directly because current_driver_name can
-        * only be read under the driver_name_lock with irqs disabled. So
-        * create a temporary copy first.
-        */
-       read_lock_irqsave(&driver_name_lock, flags);
-       len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
-       read_unlock_irqrestore(&driver_name_lock, flags);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t filter_write(struct file *file, const char __user *userbuf,
-                           size_t count, loff_t *ppos)
-{
-       char buf[NAME_MAX_LEN];
-       unsigned long flags;
-       size_t len;
-       int i;
-
-       /*
-        * We can't copy from userspace directly. Access to
-        * current_driver_name is protected with a write_lock with irqs
-        * disabled. Since copy_from_user can fault and may sleep we
-        * need to copy to temporary buffer first
-        */
-       len = min(count, (size_t)(NAME_MAX_LEN - 1));
-       if (copy_from_user(buf, userbuf, len))
-               return -EFAULT;
-
-       buf[len] = 0;
-
-       write_lock_irqsave(&driver_name_lock, flags);
-
-       /*
-        * Now handle the string we got from userspace very carefully.
-        * The rules are:
-        *         - only use the first token we got
-        *         - token delimiter is everything looking like a space
-        *           character (' ', '\n', '\t' ...)
-        *
-        */
-       if (!isalnum(buf[0])) {
-               /*
-                * If the first character userspace gave us is not
-                * alphanumerical then assume the filter should be
-                * switched off.
-                */
-               if (current_driver_name[0])
-                       pr_info("DMA-API: switching off dma-debug driver filter\n");
-               current_driver_name[0] = 0;
-               current_driver = NULL;
-               goto out_unlock;
-       }
-
-       /*
-        * Now parse out the first token and use it as the name for the
-        * driver to filter for.
-        */
-       for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
-               current_driver_name[i] = buf[i];
-               if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
-                       break;
-       }
-       current_driver_name[i] = 0;
-       current_driver = NULL;
-
-       pr_info("DMA-API: enable driver filter for driver [%s]\n",
-               current_driver_name);
-
-out_unlock:
-       write_unlock_irqrestore(&driver_name_lock, flags);
-
-       return count;
-}
-
-static const struct file_operations filter_fops = {
-       .read  = filter_read,
-       .write = filter_write,
-       .llseek = default_llseek,
-};
-
-static int dma_debug_fs_init(void)
-{
-       dma_debug_dent = debugfs_create_dir("dma-api", NULL);
-       if (!dma_debug_dent) {
-               pr_err("DMA-API: can not create debugfs directory\n");
-               return -ENOMEM;
-       }
-
-       global_disable_dent = debugfs_create_bool("disabled", 0444,
-                       dma_debug_dent,
-                       &global_disable);
-       if (!global_disable_dent)
-               goto out_err;
-
-       error_count_dent = debugfs_create_u32("error_count", 0444,
-                       dma_debug_dent, &error_count);
-       if (!error_count_dent)
-               goto out_err;
-
-       show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
-                       dma_debug_dent,
-                       &show_all_errors);
-       if (!show_all_errors_dent)
-               goto out_err;
-
-       show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
-                       dma_debug_dent,
-                       &show_num_errors);
-       if (!show_num_errors_dent)
-               goto out_err;
-
-       num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
-                       dma_debug_dent,
-                       &num_free_entries);
-       if (!num_free_entries_dent)
-               goto out_err;
-
-       min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
-                       dma_debug_dent,
-                       &min_free_entries);
-       if (!min_free_entries_dent)
-               goto out_err;
-
-       filter_dent = debugfs_create_file("driver_filter", 0644,
-                                         dma_debug_dent, NULL, &filter_fops);
-       if (!filter_dent)
-               goto out_err;
-
-       return 0;
-
-out_err:
-       debugfs_remove_recursive(dma_debug_dent);
-
-       return -ENOMEM;
-}
-
-static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
-{
-       struct dma_debug_entry *entry;
-       unsigned long flags;
-       int count = 0, i;
-
-       for (i = 0; i < HASH_SIZE; ++i) {
-               spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
-               list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
-                       if (entry->dev == dev) {
-                               count += 1;
-                               *out_entry = entry;
-                       }
-               }
-               spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
-       }
-
-       return count;
-}
-
-static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
-{
-       struct device *dev = data;
-       struct dma_debug_entry *uninitialized_var(entry);
-       int count;
-
-       if (dma_debug_disabled())
-               return 0;
-
-       switch (action) {
-       case BUS_NOTIFY_UNBOUND_DRIVER:
-               count = device_dma_allocations(dev, &entry);
-               if (count == 0)
-                       break;
-               err_printk(dev, entry, "DMA-API: device driver has pending "
-                               "DMA allocations while released from device "
-                               "[count=%d]\n"
-                               "One of leaked entries details: "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [mapped as %s]\n",
-                       count, entry->dev_addr, entry->size,
-                       dir2name[entry->direction], type2name[entry->type]);
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-void dma_debug_add_bus(struct bus_type *bus)
-{
-       struct notifier_block *nb;
-
-       if (dma_debug_disabled())
-               return;
-
-       nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
-       if (nb == NULL) {
-               pr_err("dma_debug_add_bus: out of memory\n");
-               return;
-       }
-
-       nb->notifier_call = dma_debug_device_change;
-
-       bus_register_notifier(bus, nb);
-}
-
-static int dma_debug_init(void)
-{
-       int i;
-
-       /* Do not use dma_debug_initialized here, since we really want to be
-        * called to set dma_debug_initialized
-        */
-       if (global_disable)
-               return 0;
-
-       for (i = 0; i < HASH_SIZE; ++i) {
-               INIT_LIST_HEAD(&dma_entry_hash[i].list);
-               spin_lock_init(&dma_entry_hash[i].lock);
-       }
-
-       if (dma_debug_fs_init() != 0) {
-               pr_err("DMA-API: error creating debugfs entries - disabling\n");
-               global_disable = true;
-
-               return 0;
-       }
-
-       if (prealloc_memory(nr_prealloc_entries) != 0) {
-               pr_err("DMA-API: debugging out of memory error - disabled\n");
-               global_disable = true;
-
-               return 0;
-       }
-
-       nr_total_entries = num_free_entries;
-
-       dma_debug_initialized = true;
-
-       pr_info("DMA-API: debugging enabled by kernel config\n");
-       return 0;
-}
-core_initcall(dma_debug_init);
-
-static __init int dma_debug_cmdline(char *str)
-{
-       if (!str)
-               return -EINVAL;
-
-       if (strncmp(str, "off", 3) == 0) {
-               pr_info("DMA-API: debugging disabled on kernel command line\n");
-               global_disable = true;
-       }
-
-       return 0;
-}
-
-static __init int dma_debug_entries_cmdline(char *str)
-{
-       if (!str)
-               return -EINVAL;
-       if (!get_option(&str, &nr_prealloc_entries))
-               nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
-       return 0;
-}
-
-__setup("dma_debug=", dma_debug_cmdline);
-__setup("dma_debug_entries=", dma_debug_entries_cmdline);
-
-static void check_unmap(struct dma_debug_entry *ref)
-{
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-
-       bucket = get_hash_bucket(ref, &flags);
-       entry = bucket_find_exact(bucket, ref);
-
-       if (!entry) {
-               /* must drop lock before calling dma_mapping_error */
-               put_hash_bucket(bucket, &flags);
-
-               if (dma_mapping_error(ref->dev, ref->dev_addr)) {
-                       err_printk(ref->dev, NULL,
-                                  "DMA-API: device driver tries to free an "
-                                  "invalid DMA memory address\n");
-               } else {
-                       err_printk(ref->dev, NULL,
-                                  "DMA-API: device driver tries to free DMA "
-                                  "memory it has not allocated [device "
-                                  "address=0x%016llx] [size=%llu bytes]\n",
-                                  ref->dev_addr, ref->size);
-               }
-               return;
-       }
-
-       if (ref->size != entry->size) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with different size "
-                          "[device address=0x%016llx] [map size=%llu bytes] "
-                          "[unmap size=%llu bytes]\n",
-                          ref->dev_addr, entry->size, ref->size);
-       }
-
-       if (ref->type != entry->type) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with wrong function "
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[mapped as %s] [unmapped as %s]\n",
-                          ref->dev_addr, ref->size,
-                          type2name[entry->type], type2name[ref->type]);
-       } else if ((entry->type == dma_debug_coherent) &&
-                  (phys_addr(ref) != phys_addr(entry))) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with different CPU address "
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[cpu alloc address=0x%016llx] "
-                          "[cpu free address=0x%016llx]",
-                          ref->dev_addr, ref->size,
-                          phys_addr(entry),
-                          phys_addr(ref));
-       }
-
-       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
-           ref->sg_call_ents != entry->sg_call_ents) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA sg list with different entry count "
-                          "[map count=%d] [unmap count=%d]\n",
-                          entry->sg_call_ents, ref->sg_call_ents);
-       }
-
-       /*
-        * This may be no bug in reality - but most implementations of the
-        * DMA API don't handle this properly, so check for it here
-        */
-       if (ref->direction != entry->direction) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with different direction "
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[mapped with %s] [unmapped with %s]\n",
-                          ref->dev_addr, ref->size,
-                          dir2name[entry->direction],
-                          dir2name[ref->direction]);
-       }
-
-       /*
-        * Drivers should use dma_mapping_error() to check the returned
-        * addresses of dma_map_single() and dma_map_page().
-        * If not, print this warning message. See Documentation/DMA-API.txt.
-        */
-       if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
-               err_printk(ref->dev, entry,
-                          "DMA-API: device driver failed to check map error"
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[mapped as %s]",
-                          ref->dev_addr, ref->size,
-                          type2name[entry->type]);
-       }
-
-       hash_bucket_del(entry);
-       dma_entry_free(entry);
-
-       put_hash_bucket(bucket, &flags);
-}
-
-static void check_for_stack(struct device *dev,
-                           struct page *page, size_t offset)
-{
-       void *addr;
-       struct vm_struct *stack_vm_area = task_stack_vm_area(current);
-
-       if (!stack_vm_area) {
-               /* Stack is direct-mapped. */
-               if (PageHighMem(page))
-                       return;
-               addr = page_address(page) + offset;
-               if (object_is_on_stack(addr))
-                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
-       } else {
-               /* Stack is vmalloced. */
-               int i;
-
-               for (i = 0; i < stack_vm_area->nr_pages; i++) {
-                       if (page != stack_vm_area->pages[i])
-                               continue;
-
-                       addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
-                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
-                       break;
-               }
-       }
-}
-
-static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
-{
-       unsigned long a1 = (unsigned long)addr;
-       unsigned long b1 = a1 + len;
-       unsigned long a2 = (unsigned long)start;
-       unsigned long b2 = (unsigned long)end;
-
-       return !(b1 <= a2 || a1 >= b2);
-}
-
-static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
-{
-       if (overlap(addr, len, _stext, _etext) ||
-           overlap(addr, len, __start_rodata, __end_rodata))
-               err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
-}
-
-static void check_sync(struct device *dev,
-                      struct dma_debug_entry *ref,
-                      bool to_cpu)
-{
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-
-       bucket = get_hash_bucket(ref, &flags);
-
-       entry = bucket_find_contain(&bucket, ref, &flags);
-
-       if (!entry) {
-               err_printk(dev, NULL, "DMA-API: device driver tries "
-                               "to sync DMA memory it has not allocated "
-                               "[device address=0x%016llx] [size=%llu bytes]\n",
-                               (unsigned long long)ref->dev_addr, ref->size);
-               goto out;
-       }
-
-       if (ref->size > entry->size) {
-               err_printk(dev, entry, "DMA-API: device driver syncs"
-                               " DMA memory outside allocated range "
-                               "[device address=0x%016llx] "
-                               "[allocation size=%llu bytes] "
-                               "[sync offset+size=%llu]\n",
-                               entry->dev_addr, entry->size,
-                               ref->size);
-       }
-
-       if (entry->direction == DMA_BIDIRECTIONAL)
-               goto out;
-
-       if (ref->direction != entry->direction) {
-               err_printk(dev, entry, "DMA-API: device driver syncs "
-                               "DMA memory with different direction "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)ref->dev_addr, entry->size,
-                               dir2name[entry->direction],
-                               dir2name[ref->direction]);
-       }
-
-       if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
-                     !(ref->direction == DMA_TO_DEVICE))
-               err_printk(dev, entry, "DMA-API: device driver syncs "
-                               "device read-only DMA memory for cpu "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)ref->dev_addr, entry->size,
-                               dir2name[entry->direction],
-                               dir2name[ref->direction]);
-
-       if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
-                      !(ref->direction == DMA_FROM_DEVICE))
-               err_printk(dev, entry, "DMA-API: device driver syncs "
-                               "device write-only DMA memory to device "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)ref->dev_addr, entry->size,
-                               dir2name[entry->direction],
-                               dir2name[ref->direction]);
-
-       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
-           ref->sg_call_ents != entry->sg_call_ents) {
-               err_printk(ref->dev, entry, "DMA-API: device driver syncs "
-                          "DMA sg list with different entry count "
-                          "[map count=%d] [sync count=%d]\n",
-                          entry->sg_call_ents, ref->sg_call_ents);
-       }
-
-out:
-       put_hash_bucket(bucket, &flags);
-}
-
-static void check_sg_segment(struct device *dev, struct scatterlist *sg)
-{
-#ifdef CONFIG_DMA_API_DEBUG_SG
-       unsigned int max_seg = dma_get_max_seg_size(dev);
-       u64 start, end, boundary = dma_get_seg_boundary(dev);
-
-       /*
-        * Either the driver forgot to set dma_parms appropriately, or
-        * whoever generated the list forgot to check them.
-        */
-       if (sg->length > max_seg)
-               err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
-                          sg->length, max_seg);
-       /*
-        * In some cases this could potentially be the DMA API
-        * implementation's fault, but it would usually imply that
-        * the scatterlist was built inappropriately to begin with.
-        */
-       start = sg_dma_address(sg);
-       end = start + sg_dma_len(sg) - 1;
-       if ((start ^ end) & ~boundary)
-               err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
-                          start, end, boundary);
-#endif
-}
-
-void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
-                       size_t size, int direction, dma_addr_t dma_addr,
-                       bool map_single)
-{
-       struct dma_debug_entry *entry;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       if (dma_mapping_error(dev, dma_addr))
-               return;
-
-       entry = dma_entry_alloc();
-       if (!entry)
-               return;
-
-       entry->dev       = dev;
-       entry->type      = dma_debug_page;
-       entry->pfn       = page_to_pfn(page);
-       entry->offset    = offset,
-       entry->dev_addr  = dma_addr;
-       entry->size      = size;
-       entry->direction = direction;
-       entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
-       if (map_single)
-               entry->type = dma_debug_single;
-
-       check_for_stack(dev, page, offset);
-
-       if (!PageHighMem(page)) {
-               void *addr = page_address(page) + offset;
-
-               check_for_illegal_area(dev, addr, size);
-       }
-
-       add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_page);
-
-void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       struct dma_debug_entry ref;
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.dev = dev;
-       ref.dev_addr = dma_addr;
-       bucket = get_hash_bucket(&ref, &flags);
-
-       list_for_each_entry(entry, &bucket->list, list) {
-               if (!exact_match(&ref, entry))
-                       continue;
-
-               /*
-                * The same physical address can be mapped multiple
-                * times. Without a hardware IOMMU this results in the
-                * same device addresses being put into the dma-debug
-                * hash multiple times too. This can result in false
-                * positives being reported. Therefore we implement a
-                * best-fit algorithm here which updates the first entry
-                * from the hash which fits the reference value and is
-                * not currently listed as being checked.
-                */
-               if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
-                       entry->map_err_type = MAP_ERR_CHECKED;
-                       break;
-               }
-       }
-
-       put_hash_bucket(bucket, &flags);
-}
-EXPORT_SYMBOL(debug_dma_mapping_error);
-
-void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
-                         size_t size, int direction, bool map_single)
-{
-       struct dma_debug_entry ref = {
-               .type           = dma_debug_page,
-               .dev            = dev,
-               .dev_addr       = addr,
-               .size           = size,
-               .direction      = direction,
-       };
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       if (map_single)
-               ref.type = dma_debug_single;
-
-       check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_page);
-
-void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                     int nents, int mapped_ents, int direction)
-{
-       struct dma_debug_entry *entry;
-       struct scatterlist *s;
-       int i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sg, s, mapped_ents, i) {
-               entry = dma_entry_alloc();
-               if (!entry)
-                       return;
-
-               entry->type           = dma_debug_sg;
-               entry->dev            = dev;
-               entry->pfn            = page_to_pfn(sg_page(s));
-               entry->offset         = s->offset,
-               entry->size           = sg_dma_len(s);
-               entry->dev_addr       = sg_dma_address(s);
-               entry->direction      = direction;
-               entry->sg_call_ents   = nents;
-               entry->sg_mapped_ents = mapped_ents;
-
-               check_for_stack(dev, sg_page(s), s->offset);
-
-               if (!PageHighMem(sg_page(s))) {
-                       check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
-               }
-
-               check_sg_segment(dev, s);
-
-               add_dma_entry(entry);
-       }
-}
-EXPORT_SYMBOL(debug_dma_map_sg);
-
-static int get_nr_mapped_entries(struct device *dev,
-                                struct dma_debug_entry *ref)
-{
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-       int mapped_ents;
-
-       bucket       = get_hash_bucket(ref, &flags);
-       entry        = bucket_find_exact(bucket, ref);
-       mapped_ents  = 0;
-
-       if (entry)
-               mapped_ents = entry->sg_mapped_ents;
-       put_hash_bucket(bucket, &flags);
-
-       return mapped_ents;
-}
-
-void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-                       int nelems, int dir)
-{
-       struct scatterlist *s;
-       int mapped_ents = 0, i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sglist, s, nelems, i) {
-
-               struct dma_debug_entry ref = {
-                       .type           = dma_debug_sg,
-                       .dev            = dev,
-                       .pfn            = page_to_pfn(sg_page(s)),
-                       .offset         = s->offset,
-                       .dev_addr       = sg_dma_address(s),
-                       .size           = sg_dma_len(s),
-                       .direction      = dir,
-                       .sg_call_ents   = nelems,
-               };
-
-               if (mapped_ents && i >= mapped_ents)
-                       break;
-
-               if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, &ref);
-
-               check_unmap(&ref);
-       }
-}
-EXPORT_SYMBOL(debug_dma_unmap_sg);
-
-void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                             dma_addr_t dma_addr, void *virt)
-{
-       struct dma_debug_entry *entry;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       if (unlikely(virt == NULL))
-               return;
-
-       /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
-               return;
-
-       entry = dma_entry_alloc();
-       if (!entry)
-               return;
-
-       entry->type      = dma_debug_coherent;
-       entry->dev       = dev;
-       entry->offset    = offset_in_page(virt);
-       entry->size      = size;
-       entry->dev_addr  = dma_addr;
-       entry->direction = DMA_BIDIRECTIONAL;
-
-       if (is_vmalloc_addr(virt))
-               entry->pfn = vmalloc_to_pfn(virt);
-       else
-               entry->pfn = page_to_pfn(virt_to_page(virt));
-
-       add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_alloc_coherent);
-
-void debug_dma_free_coherent(struct device *dev, size_t size,
-                        void *virt, dma_addr_t addr)
-{
-       struct dma_debug_entry ref = {
-               .type           = dma_debug_coherent,
-               .dev            = dev,
-               .offset         = offset_in_page(virt),
-               .dev_addr       = addr,
-               .size           = size,
-               .direction      = DMA_BIDIRECTIONAL,
-       };
-
-       /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
-               return;
-
-       if (is_vmalloc_addr(virt))
-               ref.pfn = vmalloc_to_pfn(virt);
-       else
-               ref.pfn = page_to_pfn(virt_to_page(virt));
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_free_coherent);
-
-void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
-                           int direction, dma_addr_t dma_addr)
-{
-       struct dma_debug_entry *entry;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       entry = dma_entry_alloc();
-       if (!entry)
-               return;
-
-       entry->type             = dma_debug_resource;
-       entry->dev              = dev;
-       entry->pfn              = PHYS_PFN(addr);
-       entry->offset           = offset_in_page(addr);
-       entry->size             = size;
-       entry->dev_addr         = dma_addr;
-       entry->direction        = direction;
-       entry->map_err_type     = MAP_ERR_NOT_CHECKED;
-
-       add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_resource);
-
-void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
-                             size_t size, int direction)
-{
-       struct dma_debug_entry ref = {
-               .type           = dma_debug_resource,
-               .dev            = dev,
-               .dev_addr       = dma_addr,
-               .size           = size,
-               .direction      = direction,
-       };
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_resource);
-
-void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                                  size_t size, int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
-
-void debug_dma_sync_single_for_device(struct device *dev,
-                                     dma_addr_t dma_handle, size_t size,
-                                     int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_device);
-
-void debug_dma_sync_single_range_for_cpu(struct device *dev,
-                                        dma_addr_t dma_handle,
-                                        unsigned long offset, size_t size,
-                                        int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = offset + size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
-
-void debug_dma_sync_single_range_for_device(struct device *dev,
-                                           dma_addr_t dma_handle,
-                                           unsigned long offset,
-                                           size_t size, int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = offset + size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
-
-void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                              int nelems, int direction)
-{
-       struct scatterlist *s;
-       int mapped_ents = 0, i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sg, s, nelems, i) {
-
-               struct dma_debug_entry ref = {
-                       .type           = dma_debug_sg,
-                       .dev            = dev,
-                       .pfn            = page_to_pfn(sg_page(s)),
-                       .offset         = s->offset,
-                       .dev_addr       = sg_dma_address(s),
-                       .size           = sg_dma_len(s),
-                       .direction      = direction,
-                       .sg_call_ents   = nelems,
-               };
-
-               if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, &ref);
-
-               if (i >= mapped_ents)
-                       break;
-
-               check_sync(dev, &ref, true);
-       }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
-
-void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                                 int nelems, int direction)
-{
-       struct scatterlist *s;
-       int mapped_ents = 0, i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sg, s, nelems, i) {
-
-               struct dma_debug_entry ref = {
-                       .type           = dma_debug_sg,
-                       .dev            = dev,
-                       .pfn            = page_to_pfn(sg_page(s)),
-                       .offset         = s->offset,
-                       .dev_addr       = sg_dma_address(s),
-                       .size           = sg_dma_len(s),
-                       .direction      = direction,
-                       .sg_call_ents   = nelems,
-               };
-               if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, &ref);
-
-               if (i >= mapped_ents)
-                       break;
-
-               check_sync(dev, &ref, false);
-       }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
-
-static int __init dma_debug_driver_setup(char *str)
-{
-       int i;
-
-       for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
-               current_driver_name[i] = *str;
-               if (*str == 0)
-                       break;
-       }
-
-       if (current_driver_name[0])
-               pr_info("DMA-API: enable driver filter for driver [%s]\n",
-                       current_driver_name);
-
-
-       return 1;
-}
-__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
deleted file mode 100644 (file)
index 8be8106..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DMA operations that map physical memory directly without using an IOMMU or
- * flushing caches.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-contiguous.h>
-#include <linux/pfn.h>
-#include <linux/set_memory.h>
-
-#define DIRECT_MAPPING_ERROR           0
-
-/*
- * Most architectures use ZONE_DMA for the first 16 Megabytes, but
- * some use it for entirely different regions:
- */
-#ifndef ARCH_ZONE_DMA_BITS
-#define ARCH_ZONE_DMA_BITS 24
-#endif
-
-/*
- * For AMD SEV all DMA must be to unencrypted addresses.
- */
-static inline bool force_dma_unencrypted(void)
-{
-       return sev_active();
-}
-
-static bool
-check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
-               const char *caller)
-{
-       if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
-               if (!dev->dma_mask) {
-                       dev_err(dev,
-                               "%s: call on device without dma_mask\n",
-                               caller);
-                       return false;
-               }
-
-               if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
-                       dev_err(dev,
-                               "%s: overflow %pad+%zu of device mask %llx\n",
-                               caller, &dma_addr, size, *dev->dma_mask);
-               }
-               return false;
-       }
-       return true;
-}
-
-static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
-{
-       dma_addr_t addr = force_dma_unencrypted() ?
-               __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
-       return addr + size - 1 <= dev->coherent_dma_mask;
-}
-
-void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
-{
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       int page_order = get_order(size);
-       struct page *page = NULL;
-       void *ret;
-
-       /* we always manually zero the memory once we are done: */
-       gfp &= ~__GFP_ZERO;
-
-       /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
-       if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
-               gfp |= GFP_DMA;
-       if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
-               gfp |= GFP_DMA32;
-
-again:
-       /* CMA can be used only in the context which permits sleeping */
-       if (gfpflags_allow_blocking(gfp)) {
-               page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
-               if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-                       dma_release_from_contiguous(dev, page, count);
-                       page = NULL;
-               }
-       }
-       if (!page)
-               page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
-
-       if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-               __free_pages(page, page_order);
-               page = NULL;
-
-               if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
-                   dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
-                   !(gfp & (GFP_DMA32 | GFP_DMA))) {
-                       gfp |= GFP_DMA32;
-                       goto again;
-               }
-
-               if (IS_ENABLED(CONFIG_ZONE_DMA) &&
-                   dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
-                   !(gfp & GFP_DMA)) {
-                       gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
-                       goto again;
-               }
-       }
-
-       if (!page)
-               return NULL;
-       ret = page_address(page);
-       if (force_dma_unencrypted()) {
-               set_memory_decrypted((unsigned long)ret, 1 << page_order);
-               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
-       } else {
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       }
-       memset(ret, 0, size);
-       return ret;
-}
-
-/*
- * NOTE: this function must never look at the dma_addr argument, because we want
- * to be able to use it as a helper for iommu implementations as well.
- */
-void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
-               dma_addr_t dma_addr, unsigned long attrs)
-{
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned int page_order = get_order(size);
-
-       if (force_dma_unencrypted())
-               set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
-       if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
-               free_pages((unsigned long)cpu_addr, page_order);
-}
-
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
-
-       if (!check_addr(dev, dma_addr, size, __func__))
-               return DIRECT_MAPPING_ERROR;
-       return dma_addr;
-}
-
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
-               enum dma_data_direction dir, unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sgl, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-
-               sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
-               if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
-                       return 0;
-               sg_dma_len(sg) = sg->length;
-       }
-
-       return nents;
-}
-
-int dma_direct_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
-               return 0;
-#else
-       /*
-        * Because 32-bit DMA masks are so common we expect every architecture
-        * to be able to satisfy them - either by not supporting more physical
-        * memory, or by providing a ZONE_DMA32.  If neither is the case, the
-        * architecture needs to use an IOMMU instead of the direct mapping.
-        */
-       if (mask < DMA_BIT_MASK(32))
-               return 0;
-#endif
-       /*
-        * Various PCI/PCIe bridges have broken support for > 32bit DMA even
-        * if the device itself might support it.
-        */
-       if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
-               return 0;
-       return 1;
-}
-
-int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return dma_addr == DIRECT_MAPPING_ERROR;
-}
-
-const struct dma_map_ops dma_direct_ops = {
-       .alloc                  = dma_direct_alloc,
-       .free                   = dma_direct_free,
-       .map_page               = dma_direct_map_page,
-       .map_sg                 = dma_direct_map_sg,
-       .dma_supported          = dma_direct_supported,
-       .mapping_error          = dma_direct_mapping_error,
-};
-EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c
deleted file mode 100644 (file)
index 79e9a75..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 Christoph Hellwig.
- *
- * DMA operations that map physical memory directly without providing cache
- * coherence.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/scatterlist.h>
-
-static void dma_noncoherent_sync_single_for_device(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-       arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
-}
-
-static void dma_noncoherent_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
-}
-
-static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t addr;
-
-       addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
-       if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
-                               size, dir);
-       return addr;
-}
-
-static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
-       if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
-       return nents;
-}
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-       arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
-}
-
-static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
-}
-
-static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
-               size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
-}
-#endif
-
-const struct dma_map_ops dma_noncoherent_ops = {
-       .alloc                  = arch_dma_alloc,
-       .free                   = arch_dma_free,
-       .mmap                   = arch_dma_mmap,
-       .sync_single_for_device = dma_noncoherent_sync_single_for_device,
-       .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
-       .map_page               = dma_noncoherent_map_page,
-       .map_sg                 = dma_noncoherent_map_sg,
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-       .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
-       .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
-       .unmap_page             = dma_noncoherent_unmap_page,
-       .unmap_sg               = dma_noncoherent_unmap_sg,
-#endif
-       .dma_supported          = dma_direct_supported,
-       .mapping_error          = dma_direct_mapping_error,
-       .cache_sync             = arch_dma_cache_sync,
-};
-EXPORT_SYMBOL(dma_noncoherent_ops);
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
deleted file mode 100644 (file)
index 8e61a02..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *     lib/dma-virt.c
- *
- * DMA operations that map to virtual addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-
-static void *dma_virt_alloc(struct device *dev, size_t size,
-                           dma_addr_t *dma_handle, gfp_t gfp,
-                           unsigned long attrs)
-{
-       void *ret;
-
-       ret = (void *)__get_free_pages(gfp, get_order(size));
-       if (ret)
-               *dma_handle = (uintptr_t)ret;
-       return ret;
-}
-
-static void dma_virt_free(struct device *dev, size_t size,
-                         void *cpu_addr, dma_addr_t dma_addr,
-                         unsigned long attrs)
-{
-       free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
-                                   unsigned long offset, size_t size,
-                                   enum dma_data_direction dir,
-                                   unsigned long attrs)
-{
-       return (uintptr_t)(page_address(page) + offset);
-}
-
-static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
-                          int nents, enum dma_data_direction dir,
-                          unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sgl, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-               sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
-               sg_dma_len(sg) = sg->length;
-       }
-
-       return nents;
-}
-
-const struct dma_map_ops dma_virt_ops = {
-       .alloc                  = dma_virt_alloc,
-       .free                   = dma_virt_free,
-       .map_page               = dma_virt_map_page,
-       .map_sg                 = dma_virt_map_sg,
-};
-EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
deleted file mode 100644 (file)
index 04b68d9..0000000
+++ /dev/null
@@ -1,1087 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * This implementation is a fallback for platforms that do not support
- * I/O TLBs (aka DMA address translation hardware).
- * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
- * Copyright (C) 2000, 2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 03/05/07 davidm     Switch from PCI-DMA to generic device DMA API.
- * 00/12/13 davidm     Rename to swiotlb.c and add mark_clean() to avoid
- *                     unnecessary i-cache flushing.
- * 04/07/.. ak         Better overflow handling. Assorted fixes.
- * 05/09/10 linville   Add support for syncing ranges, support syncing for
- *                     DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
- * 08/12/11 beckyb     Add highmem support
- */
-
-#include <linux/cache.h>
-#include <linux/dma-direct.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/swiotlb.h>
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/scatterlist.h>
-#include <linux/mem_encrypt.h>
-#include <linux/set_memory.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/iommu-helper.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/swiotlb.h>
-
-#define OFFSET(val,align) ((unsigned long)     \
-                          ( (val) & ( (align) - 1)))
-
-#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
-
-/*
- * Minimum IO TLB size to bother booting with.  Systems with mainly
- * 64bit capable cards will only lightly use the swiotlb.  If we can't
- * allocate a contiguous 1MB, we're probably in trouble anyway.
- */
-#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-
-enum swiotlb_force swiotlb_force;
-
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static phys_addr_t io_tlb_start, io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
- * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
-
-/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-static phys_addr_t io_tlb_overflow_buffer;
-
-/*
- * This is a free list describing the number of free entries available from
- * each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
-
-/*
- * Max segment that we can provide which (if pages are contingous) will
- * not be bounced (unless SWIOTLB_FORCE is set).
- */
-unsigned int max_segment;
-
-/*
- * We need to save away the original address corresponding to a mapped entry
- * for the sync operations.
- */
-#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
-static phys_addr_t *io_tlb_orig_addr;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */
-static DEFINE_SPINLOCK(io_tlb_lock);
-
-static int late_alloc;
-
-static int __init
-setup_io_tlb_npages(char *str)
-{
-       if (isdigit(*str)) {
-               io_tlb_nslabs = simple_strtoul(str, &str, 0);
-               /* avoid tail segment of size < IO_TLB_SEGSIZE */
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
-       if (*str == ',')
-               ++str;
-       if (!strcmp(str, "force")) {
-               swiotlb_force = SWIOTLB_FORCE;
-       } else if (!strcmp(str, "noforce")) {
-               swiotlb_force = SWIOTLB_NO_FORCE;
-               io_tlb_nslabs = 1;
-       }
-
-       return 0;
-}
-early_param("swiotlb", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
-
-unsigned long swiotlb_nr_tbl(void)
-{
-       return io_tlb_nslabs;
-}
-EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
-
-unsigned int swiotlb_max_segment(void)
-{
-       return max_segment;
-}
-EXPORT_SYMBOL_GPL(swiotlb_max_segment);
-
-void swiotlb_set_max_segment(unsigned int val)
-{
-       if (swiotlb_force == SWIOTLB_FORCE)
-               max_segment = 1;
-       else
-               max_segment = rounddown(val, PAGE_SIZE);
-}
-
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
-unsigned long swiotlb_size_or_default(void)
-{
-       unsigned long size;
-
-       size = io_tlb_nslabs << IO_TLB_SHIFT;
-
-       return size ? size : (IO_TLB_DEFAULT_SIZE);
-}
-
-static bool no_iotlb_memory;
-
-void swiotlb_print_info(void)
-{
-       unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-       unsigned char *vstart, *vend;
-
-       if (no_iotlb_memory) {
-               pr_warn("software IO TLB: No low mem\n");
-               return;
-       }
-
-       vstart = phys_to_virt(io_tlb_start);
-       vend = phys_to_virt(io_tlb_end);
-
-       printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
-              (unsigned long long)io_tlb_start,
-              (unsigned long long)io_tlb_end,
-              bytes >> 20, vstart, vend - 1);
-}
-
-/*
- * Early SWIOTLB allocation may be too early to allow an architecture to
- * perform the desired operations.  This function allows the architecture to
- * call SWIOTLB when the operations are possible.  It needs to be called
- * before the SWIOTLB memory is used.
- */
-void __init swiotlb_update_mem_attributes(void)
-{
-       void *vaddr;
-       unsigned long bytes;
-
-       if (no_iotlb_memory || late_alloc)
-               return;
-
-       vaddr = phys_to_virt(io_tlb_start);
-       bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
-       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
-       memset(vaddr, 0, bytes);
-
-       vaddr = phys_to_virt(io_tlb_overflow_buffer);
-       bytes = PAGE_ALIGN(io_tlb_overflow);
-       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
-       memset(vaddr, 0, bytes);
-}
-
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
-{
-       void *v_overflow_buffer;
-       unsigned long i, bytes;
-
-       bytes = nslabs << IO_TLB_SHIFT;
-
-       io_tlb_nslabs = nslabs;
-       io_tlb_start = __pa(tlb);
-       io_tlb_end = io_tlb_start + bytes;
-
-       /*
-        * Get the overflow emergency buffer
-        */
-       v_overflow_buffer = memblock_virt_alloc_low_nopanic(
-                                               PAGE_ALIGN(io_tlb_overflow),
-                                               PAGE_SIZE);
-       if (!v_overflow_buffer)
-               return -ENOMEM;
-
-       io_tlb_overflow_buffer = __pa(v_overflow_buffer);
-
-       /*
-        * Allocate and initialize the free list array.  This array is used
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
-        */
-       io_tlb_list = memblock_virt_alloc(
-                               PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
-                               PAGE_SIZE);
-       io_tlb_orig_addr = memblock_virt_alloc(
-                               PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
-                               PAGE_SIZE);
-       for (i = 0; i < io_tlb_nslabs; i++) {
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-       }
-       io_tlb_index = 0;
-
-       if (verbose)
-               swiotlb_print_info();
-
-       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
-       return 0;
-}
-
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void  __init
-swiotlb_init(int verbose)
-{
-       size_t default_size = IO_TLB_DEFAULT_SIZE;
-       unsigned char *vstart;
-       unsigned long bytes;
-
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
-
-       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
-       /* Get IO TLB memory from the low pages */
-       vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
-       if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
-               return;
-
-       if (io_tlb_start)
-               memblock_free_early(io_tlb_start,
-                                   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
-       pr_warn("Cannot allocate SWIOTLB buffer");
-       no_iotlb_memory = true;
-}
-
-/*
- * Systems with larger DMA zones (those that don't support ISA) can
- * initialize the swiotlb later using the slab allocator if needed.
- * This should be just like above, but with some error catching.
- */
-int
-swiotlb_late_init_with_default_size(size_t default_size)
-{
-       unsigned long bytes, req_nslabs = io_tlb_nslabs;
-       unsigned char *vstart = NULL;
-       unsigned int order;
-       int rc = 0;
-
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
-
-       /*
-        * Get IO TLB memory from the low pages
-        */
-       order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
-       io_tlb_nslabs = SLABS_PER_PAGE << order;
-       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
-       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-               vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
-                                                 order);
-               if (vstart)
-                       break;
-               order--;
-       }
-
-       if (!vstart) {
-               io_tlb_nslabs = req_nslabs;
-               return -ENOMEM;
-       }
-       if (order != get_order(bytes)) {
-               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
-                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
-               io_tlb_nslabs = SLABS_PER_PAGE << order;
-       }
-       rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
-       if (rc)
-               free_pages((unsigned long)vstart, order);
-
-       return rc;
-}
-
-int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
-{
-       unsigned long i, bytes;
-       unsigned char *v_overflow_buffer;
-
-       bytes = nslabs << IO_TLB_SHIFT;
-
-       io_tlb_nslabs = nslabs;
-       io_tlb_start = virt_to_phys(tlb);
-       io_tlb_end = io_tlb_start + bytes;
-
-       set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
-       memset(tlb, 0, bytes);
-
-       /*
-        * Get the overflow emergency buffer
-        */
-       v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
-                                                    get_order(io_tlb_overflow));
-       if (!v_overflow_buffer)
-               goto cleanup2;
-
-       set_memory_decrypted((unsigned long)v_overflow_buffer,
-                       io_tlb_overflow >> PAGE_SHIFT);
-       memset(v_overflow_buffer, 0, io_tlb_overflow);
-       io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
-
-       /*
-        * Allocate and initialize the free list array.  This array is used
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
-        */
-       io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
-                                     get_order(io_tlb_nslabs * sizeof(int)));
-       if (!io_tlb_list)
-               goto cleanup3;
-
-       io_tlb_orig_addr = (phys_addr_t *)
-               __get_free_pages(GFP_KERNEL,
-                                get_order(io_tlb_nslabs *
-                                          sizeof(phys_addr_t)));
-       if (!io_tlb_orig_addr)
-               goto cleanup4;
-
-       for (i = 0; i < io_tlb_nslabs; i++) {
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-       }
-       io_tlb_index = 0;
-
-       swiotlb_print_info();
-
-       late_alloc = 1;
-
-       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
-
-       return 0;
-
-cleanup4:
-       free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
-                                                        sizeof(int)));
-       io_tlb_list = NULL;
-cleanup3:
-       free_pages((unsigned long)v_overflow_buffer,
-                  get_order(io_tlb_overflow));
-       io_tlb_overflow_buffer = 0;
-cleanup2:
-       io_tlb_end = 0;
-       io_tlb_start = 0;
-       io_tlb_nslabs = 0;
-       max_segment = 0;
-       return -ENOMEM;
-}
-
-void __init swiotlb_exit(void)
-{
-       if (!io_tlb_orig_addr)
-               return;
-
-       if (late_alloc) {
-               free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
-                          get_order(io_tlb_overflow));
-               free_pages((unsigned long)io_tlb_orig_addr,
-                          get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-               free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
-                                                                sizeof(int)));
-               free_pages((unsigned long)phys_to_virt(io_tlb_start),
-                          get_order(io_tlb_nslabs << IO_TLB_SHIFT));
-       } else {
-               memblock_free_late(io_tlb_overflow_buffer,
-                                  PAGE_ALIGN(io_tlb_overflow));
-               memblock_free_late(__pa(io_tlb_orig_addr),
-                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
-               memblock_free_late(__pa(io_tlb_list),
-                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
-               memblock_free_late(io_tlb_start,
-                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
-       }
-       io_tlb_nslabs = 0;
-       max_segment = 0;
-}
-
-int is_swiotlb_buffer(phys_addr_t paddr)
-{
-       return paddr >= io_tlb_start && paddr < io_tlb_end;
-}
-
-/*
- * Bounce: copy the swiotlb buffer back to the original dma location
- */
-static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
-                          size_t size, enum dma_data_direction dir)
-{
-       unsigned long pfn = PFN_DOWN(orig_addr);
-       unsigned char *vaddr = phys_to_virt(tlb_addr);
-
-       if (PageHighMem(pfn_to_page(pfn))) {
-               /* The buffer does not have a mapping.  Map it in and copy */
-               unsigned int offset = orig_addr & ~PAGE_MASK;
-               char *buffer;
-               unsigned int sz = 0;
-               unsigned long flags;
-
-               while (size) {
-                       sz = min_t(size_t, PAGE_SIZE - offset, size);
-
-                       local_irq_save(flags);
-                       buffer = kmap_atomic(pfn_to_page(pfn));
-                       if (dir == DMA_TO_DEVICE)
-                               memcpy(vaddr, buffer + offset, sz);
-                       else
-                               memcpy(buffer + offset, vaddr, sz);
-                       kunmap_atomic(buffer);
-                       local_irq_restore(flags);
-
-                       size -= sz;
-                       pfn++;
-                       vaddr += sz;
-                       offset = 0;
-               }
-       } else if (dir == DMA_TO_DEVICE) {
-               memcpy(vaddr, phys_to_virt(orig_addr), size);
-       } else {
-               memcpy(phys_to_virt(orig_addr), vaddr, size);
-       }
-}
-
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
-                                  dma_addr_t tbl_dma_addr,
-                                  phys_addr_t orig_addr, size_t size,
-                                  enum dma_data_direction dir,
-                                  unsigned long attrs)
-{
-       unsigned long flags;
-       phys_addr_t tlb_addr;
-       unsigned int nslots, stride, index, wrap;
-       int i;
-       unsigned long mask;
-       unsigned long offset_slots;
-       unsigned long max_slots;
-
-       if (no_iotlb_memory)
-               panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
-
-       if (mem_encrypt_active())
-               pr_warn_once("%s is active and system is using DMA bounce buffers\n",
-                            sme_active() ? "SME" : "SEV");
-
-       mask = dma_get_seg_boundary(hwdev);
-
-       tbl_dma_addr &= mask;
-
-       offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-
-       /*
-        * Carefully handle integer overflow which can occur when mask == ~0UL.
-        */
-       max_slots = mask + 1
-                   ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
-                   : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
-
-       /*
-        * For mappings greater than or equal to a page, we limit the stride
-        * (and hence alignment) to a page size.
-        */
-       nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       if (size >= PAGE_SIZE)
-               stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-       else
-               stride = 1;
-
-       BUG_ON(!nslots);
-
-       /*
-        * Find suitable number of IO TLB entries size that will fit this
-        * request and allocate a buffer from that IO TLB pool.
-        */
-       spin_lock_irqsave(&io_tlb_lock, flags);
-       index = ALIGN(io_tlb_index, stride);
-       if (index >= io_tlb_nslabs)
-               index = 0;
-       wrap = index;
-
-       do {
-               while (iommu_is_span_boundary(index, nslots, offset_slots,
-                                             max_slots)) {
-                       index += stride;
-                       if (index >= io_tlb_nslabs)
-                               index = 0;
-                       if (index == wrap)
-                               goto not_found;
-               }
-
-               /*
-                * If we find a slot that indicates we have 'nslots' number of
-                * contiguous buffers, we allocate the buffers from that slot
-                * and mark the entries as '0' indicating unavailable.
-                */
-               if (io_tlb_list[index] >= nslots) {
-                       int count = 0;
-
-                       for (i = index; i < (int) (index + nslots); i++)
-                               io_tlb_list[i] = 0;
-                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
-                               io_tlb_list[i] = ++count;
-                       tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
-                       /*
-                        * Update the indices to avoid searching in the next
-                        * round.
-                        */
-                       io_tlb_index = ((index + nslots) < io_tlb_nslabs
-                                       ? (index + nslots) : 0);
-
-                       goto found;
-               }
-               index += stride;
-               if (index >= io_tlb_nslabs)
-                       index = 0;
-       } while (index != wrap);
-
-not_found:
-       spin_unlock_irqrestore(&io_tlb_lock, flags);
-       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
-               dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
-       return SWIOTLB_MAP_ERROR;
-found:
-       spin_unlock_irqrestore(&io_tlb_lock, flags);
-
-       /*
-        * Save away the mapping from the original address to the DMA address.
-        * This is needed when we sync the memory.  Then we sync the buffer if
-        * needed.
-        */
-       for (i = 0; i < nslots; i++)
-               io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
-
-       return tlb_addr;
-}
-
-/*
- * Allocates bounce buffer and returns its physical address.
- */
-static phys_addr_t
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-          enum dma_data_direction dir, unsigned long attrs)
-{
-       dma_addr_t start_dma_addr;
-
-       if (swiotlb_force == SWIOTLB_NO_FORCE) {
-               dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
-                                    &phys);
-               return SWIOTLB_MAP_ERROR;
-       }
-
-       start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
-       return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
-                                     dir, attrs);
-}
-
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
-                             size_t size, enum dma_data_direction dir,
-                             unsigned long attrs)
-{
-       unsigned long flags;
-       int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
-       /*
-        * First, sync the memory before unmapping the entry
-        */
-       if (orig_addr != INVALID_PHYS_ADDR &&
-           !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
-
-       /*
-        * Return the buffer to the free list by setting the corresponding
-        * entries to indicate the number of contiguous entries available.
-        * While returning the entries to the free list, we merge the entries
-        * with slots below and above the pool being returned.
-        */
-       spin_lock_irqsave(&io_tlb_lock, flags);
-       {
-               count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-                        io_tlb_list[index + nslots] : 0);
-               /*
-                * Step 1: return the slots to the free list, merging the
-                * slots with superceeding slots
-                */
-               for (i = index + nslots - 1; i >= index; i--) {
-                       io_tlb_list[i] = ++count;
-                       io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-               }
-               /*
-                * Step 2: merge the returned slots with the preceding slots,
-                * if available (non zero)
-                */
-               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
-                       io_tlb_list[i] = ++count;
-       }
-       spin_unlock_irqrestore(&io_tlb_lock, flags);
-}
-
-void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
-                            size_t size, enum dma_data_direction dir,
-                            enum dma_sync_target target)
-{
-       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
-       if (orig_addr == INVALID_PHYS_ADDR)
-               return;
-       orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
-
-       switch (target) {
-       case SYNC_FOR_CPU:
-               if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(orig_addr, tlb_addr,
-                                      size, DMA_FROM_DEVICE);
-               else
-                       BUG_ON(dir != DMA_TO_DEVICE);
-               break;
-       case SYNC_FOR_DEVICE:
-               if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(orig_addr, tlb_addr,
-                                      size, DMA_TO_DEVICE);
-               else
-                       BUG_ON(dir != DMA_FROM_DEVICE);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
-               size_t size)
-{
-       u64 mask = DMA_BIT_MASK(32);
-
-       if (dev && dev->coherent_dma_mask)
-               mask = dev->coherent_dma_mask;
-       return addr + size - 1 <= mask;
-}
-
-static void *
-swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               unsigned long attrs)
-{
-       phys_addr_t phys_addr;
-
-       if (swiotlb_force == SWIOTLB_NO_FORCE)
-               goto out_warn;
-
-       phys_addr = swiotlb_tbl_map_single(dev,
-                       __phys_to_dma(dev, io_tlb_start),
-                       0, size, DMA_FROM_DEVICE, attrs);
-       if (phys_addr == SWIOTLB_MAP_ERROR)
-               goto out_warn;
-
-       *dma_handle = __phys_to_dma(dev, phys_addr);
-       if (!dma_coherent_ok(dev, *dma_handle, size))
-               goto out_unmap;
-
-       memset(phys_to_virt(phys_addr), 0, size);
-       return phys_to_virt(phys_addr);
-
-out_unmap:
-       dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
-               (unsigned long long)dev->coherent_dma_mask,
-               (unsigned long long)*dma_handle);
-
-       /*
-        * DMA_TO_DEVICE to avoid memcpy in unmap_single.
-        * DMA_ATTR_SKIP_CPU_SYNC is optional.
-        */
-       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
-                       DMA_ATTR_SKIP_CPU_SYNC);
-out_warn:
-       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
-               dev_warn(dev,
-                       "swiotlb: coherent allocation failed, size=%zu\n",
-                       size);
-               dump_stack();
-       }
-       return NULL;
-}
-
-static bool swiotlb_free_buffer(struct device *dev, size_t size,
-               dma_addr_t dma_addr)
-{
-       phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
-
-       WARN_ON_ONCE(irqs_disabled());
-
-       if (!is_swiotlb_buffer(phys_addr))
-               return false;
-
-       /*
-        * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
-        * DMA_ATTR_SKIP_CPU_SYNC is optional.
-        */
-       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
-                                DMA_ATTR_SKIP_CPU_SYNC);
-       return true;
-}
-
-static void
-swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
-            int do_panic)
-{
-       if (swiotlb_force == SWIOTLB_NO_FORCE)
-               return;
-
-       /*
-        * Ran out of IOMMU space for this operation. This is very bad.
-        * Unfortunately the drivers cannot handle this operation properly.
-        * unless they check for dma_mapping_error (most don't)
-        * When the mapping is small enough return a static buffer to limit
-        * the damage, or panic when the transfer is too big.
-        */
-       dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
-                           size);
-
-       if (size <= io_tlb_overflow || !do_panic)
-               return;
-
-       if (dir == DMA_BIDIRECTIONAL)
-               panic("DMA: Random memory could be DMA accessed\n");
-       if (dir == DMA_FROM_DEVICE)
-               panic("DMA: Random memory could be DMA written\n");
-       if (dir == DMA_TO_DEVICE)
-               panic("DMA: Random memory could be DMA read\n");
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode.  The
- * physical address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
- */
-dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
-                           unsigned long offset, size_t size,
-                           enum dma_data_direction dir,
-                           unsigned long attrs)
-{
-       phys_addr_t map, phys = page_to_phys(page) + offset;
-       dma_addr_t dev_addr = phys_to_dma(dev, phys);
-
-       BUG_ON(dir == DMA_NONE);
-       /*
-        * If the address happens to be in the device's DMA window,
-        * we can safely return the device addr and not worry about bounce
-        * buffering it.
-        */
-       if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
-               return dev_addr;
-
-       trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-
-       /* Oh well, have to allocate and map a bounce buffer. */
-       map = map_single(dev, phys, size, dir, attrs);
-       if (map == SWIOTLB_MAP_ERROR) {
-               swiotlb_full(dev, size, dir, 1);
-               return __phys_to_dma(dev, io_tlb_overflow_buffer);
-       }
-
-       dev_addr = __phys_to_dma(dev, map);
-
-       /* Ensure that the address returned is DMA'ble */
-       if (dma_capable(dev, dev_addr, size))
-               return dev_addr;
-
-       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-       swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
-
-       return __phys_to_dma(dev, io_tlb_overflow_buffer);
-}
-
-/*
- * Unmap a single streaming mode DMA translation.  The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_page call.  All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-                        size_t size, enum dma_data_direction dir,
-                        unsigned long attrs)
-{
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-
-       if (is_swiotlb_buffer(paddr)) {
-               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
-               return;
-       }
-
-       if (dir != DMA_FROM_DEVICE)
-               return;
-
-       /*
-        * phys_to_virt doesn't work with hihgmem page but we could
-        * call dma_mark_clean() with hihgmem page here. However, we
-        * are fine since dma_mark_clean() is null on POWERPC. We can
-        * make dma_mark_clean() take a physical address if necessary.
-        */
-       dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
-                       size_t size, enum dma_data_direction dir,
-                       unsigned long attrs)
-{
-       unmap_single(hwdev, dev_addr, size, dir, attrs);
-}
-
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so.  At the next point you give the dma
- * address back to the card, you must first perform a
- * swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
-static void
-swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
-                   size_t size, enum dma_data_direction dir,
-                   enum dma_sync_target target)
-{
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-
-       if (is_swiotlb_buffer(paddr)) {
-               swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
-               return;
-       }
-
-       if (dir != DMA_FROM_DEVICE)
-               return;
-
-       dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-                           size_t size, enum dma_data_direction dir)
-{
-       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-                              size_t size, enum dma_data_direction dir)
-{
-       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_page
- * interface.  Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length.  They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for swiotlb_map_page are the
- * same here.
- */
-int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                    enum dma_data_direction dir, unsigned long attrs)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(dir == DMA_NONE);
-
-       for_each_sg(sgl, sg, nelems, i) {
-               phys_addr_t paddr = sg_phys(sg);
-               dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
-
-               if (swiotlb_force == SWIOTLB_FORCE ||
-                   !dma_capable(hwdev, dev_addr, sg->length)) {
-                       phys_addr_t map = map_single(hwdev, sg_phys(sg),
-                                                    sg->length, dir, attrs);
-                       if (map == SWIOTLB_MAP_ERROR) {
-                               /* Don't panic here, we expect map_sg users
-                                  to do proper error handling. */
-                               swiotlb_full(hwdev, sg->length, dir, 0);
-                               attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-                               swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
-                                                      attrs);
-                               sg_dma_len(sgl) = 0;
-                               return 0;
-                       }
-                       sg->dma_address = __phys_to_dma(hwdev, map);
-               } else
-                       sg->dma_address = dev_addr;
-               sg_dma_len(sg) = sg->length;
-       }
-       return nelems;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-                      int nelems, enum dma_data_direction dir,
-                      unsigned long attrs)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(dir == DMA_NONE);
-
-       for_each_sg(sgl, sg, nelems, i)
-               unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
-                            attrs);
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
-static void
-swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
-               int nelems, enum dma_data_direction dir,
-               enum dma_sync_target target)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nelems, i)
-               swiotlb_sync_single(hwdev, sg->dma_address,
-                                   sg_dma_len(sg), dir, target);
-}
-
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-                       int nelems, enum dma_data_direction dir)
-{
-       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-                          int nelems, enum dma_data_direction dir)
-{
-       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
-}
-
-int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
-       return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
-}
-
-/*
- * Return whether the given device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask to
- * this function.
- */
-int
-swiotlb_dma_supported(struct device *hwdev, u64 mask)
-{
-       return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
-}
-
-void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
-{
-       void *vaddr;
-
-       /* temporary workaround: */
-       if (gfp & __GFP_NOWARN)
-               attrs |= DMA_ATTR_NO_WARN;
-
-       /*
-        * Don't print a warning when the first allocation attempt fails.
-        * swiotlb_alloc_coherent() will print a warning when the DMA memory
-        * allocation ultimately failed.
-        */
-       gfp |= __GFP_NOWARN;
-
-       vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
-       if (!vaddr)
-               vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
-       return vaddr;
-}
-
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_addr, unsigned long attrs)
-{
-       if (!swiotlb_free_buffer(dev, size, dma_addr))
-               dma_direct_free(dev, size, vaddr, dma_addr, attrs);
-}
-
-const struct dma_map_ops swiotlb_dma_ops = {
-       .mapping_error          = swiotlb_dma_mapping_error,
-       .alloc                  = swiotlb_alloc,
-       .free                   = swiotlb_free,
-       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
-       .sync_single_for_device = swiotlb_sync_single_for_device,
-       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
-       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
-       .map_sg                 = swiotlb_map_sg_attrs,
-       .unmap_sg               = swiotlb_unmap_sg_attrs,
-       .map_page               = swiotlb_map_page,
-       .unmap_page             = swiotlb_unmap_page,
-       .dma_supported          = dma_direct_supported,
-};
index cc16d70..03d48d8 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kmemleak.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
-#include <linux/bootmem.h>
 
 #include <asm/sections.h>
 #include <linux/io.h>
index 2520bc1..078999a 100755 (executable)
@@ -21,7 +21,7 @@ GetOptions(
 );
 
 if ($help != 0) {
-    print "$scriptname [--help] [--fix-rst]\n";
+    print "$scriptname [--help] [--fix]\n";
     exit -1;
 }
 
@@ -38,16 +38,31 @@ while (<IN>) {
        my $f = $1;
        my $ln = $2;
 
-       # Makefiles contain nasty expressions to parse docs
-       next if ($f =~ m/Makefile/);
+       # Makefiles and scripts contain nasty expressions to parse docs
+       next if ($f =~ m/Makefile/ || $f =~ m/\.sh$/);
+
        # Skip this script
        next if ($f eq $scriptname);
 
-       if ($ln =~ m,\b(\S*)(Documentation/[A-Za-z0-9\_\.\,\~/\*+-]*),) {
+       if ($ln =~ m,\b(\S*)(Documentation/[A-Za-z0-9\_\.\,\~/\*\[\]\?+-]*)(.*),) {
                my $prefix = $1;
                my $ref = $2;
                my $base = $2;
+               my $extra = $3;
+
+               # some file references are like:
+               # /usr/src/linux/Documentation/DMA-{API,mapping}.txt
+               # For now, ignore them
+               next if ($extra =~ m/^{/);
+
+               # Remove footnotes at the end like:
+               # Documentation/devicetree/dt-object-internal.txt[1]
+               $ref =~ s/(txt|rst)\[\d+]$/$1/;
+
+               # Remove ending ']' without any '['
+               $ref =~ s/\].*// if (!($ref =~ m/\[/));
 
+               # Remove puntuation marks at the end
                $ref =~ s/[\,\.]+$//;
 
                my $fulref = "$prefix$ref";
@@ -63,8 +78,15 @@ while (<IN>) {
                # Check if exists, evaluating wildcards
                next if (grep -e, glob("$ref $fulref"));
 
+               # Accept relative Documentation patches for tools/
+               if ($f =~ m/tools/) {
+                       my $path = $f;
+                       $path =~ s,(.*)/.*,$1,;
+                       next if (grep -e, glob("$path/$ref $path/$fulref"));
+               }
+
                if ($fix) {
-                       if (!($ref =~ m/(devicetree|scripts|Kconfig|Kbuild)/)) {
+                       if (!($ref =~ m/(scripts|Kconfig|Kbuild)/)) {
                                $broken_ref{$ref}++;
                        }
                } else {
@@ -84,10 +106,19 @@ foreach my $ref (keys %broken_ref) {
        # get just the basename
        $new =~ s,.*/,,;
 
-       # Seek for the same name on another place, as it may have been moved
        my $f="";
 
-       $f = qx(find . -iname $new) if ($new);
+       # usual reason for breakage: DT file moved around
+       if ($ref =~ /devicetree/) {
+               my $search = $new;
+               $search =~ s,^.*/,,;
+               $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
+               if (!$f) {
+                       # Manufacturer name may have changed
+                       $search =~ s/^.*,//;
+                       $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
+               }
+       }
 
        # usual reason for breakage: file renamed to .rst
        if (!$f) {
@@ -95,6 +126,17 @@ foreach my $ref (keys %broken_ref) {
                $f=qx(find . -iname $new) if ($new);
        }
 
+       # usual reason for breakage: use dash or underline
+       if (!$f) {
+               $new =~ s/[-_]/[-_]/g;
+               $f=qx(find . -iname $new) if ($new);
+       }
+
+       # Wild guess: seek for the same name on another place
+       if (!$f) {
+               $f = qx(find . -iname $new) if ($new);
+       }
+
        my @find = split /\s+/, $f;
 
        if (!$f) {
index c65b39b..cd97929 100644 (file)
@@ -509,7 +509,7 @@ static inline int may_allow_all(struct dev_cgroup *parent)
  * This is one of the three key functions for hierarchy implementation.
  * This function is responsible for re-evaluating all the cgroup's active
  * exceptions due to a parent's exception change.
- * Refer to Documentation/cgroups/devices.txt for more details.
+ * Refer to Documentation/cgroup-v1/devices.txt for more details.
  */
 static void revalidate_active_exceptions(struct dev_cgroup *devcg)
 {
index 9a46dc2..2b5ee5f 100644 (file)
@@ -4728,7 +4728,7 @@ err_af:
 }
 
 /* This supports connect(2) and SCTP connect services such as sctp_connectx(3)
- * and sctp_sendmsg(3) as described in Documentation/security/LSM-sctp.txt
+ * and sctp_sendmsg(3) as described in Documentation/security/LSM-sctp.rst
  */
 static int selinux_socket_connect_helper(struct socket *sock,
                                         struct sockaddr *address, int addrlen)
index 6e937a8..63b3ef9 100644 (file)
@@ -48,7 +48,7 @@ config SND_MIXER_OSS
        depends on SND_OSSEMUL
        help
          To enable OSS mixer API emulation (/dev/mixer*), say Y here
-         and read <file:Documentation/sound/alsa/OSS-Emulation.txt>.
+         and read <file:Documentation/sound/designs/oss-emulation.rst>.
 
          Many programs still use the OSS API, so say Y.
 
@@ -61,7 +61,7 @@ config SND_PCM_OSS
        select SND_PCM
        help
          To enable OSS digital audio (PCM) emulation (/dev/dsp*), say Y
-         here and read <file:Documentation/sound/alsa/OSS-Emulation.txt>.
+         here and read <file:Documentation/sound/designs/oss-emulation.rst>.
 
          Many programs still use the OSS API, so say Y.
 
index 7144cc3..648a12d 100644 (file)
@@ -153,7 +153,7 @@ config SND_SERIAL_U16550
        select SND_RAWMIDI
        help
          To include support for MIDI serial port interfaces, say Y here
-         and read <file:Documentation/sound/alsa/serial-u16550.txt>.
+         and read <file:Documentation/sound/cards/serial-u16550.rst>.
          This driver works with serial UARTs 16550 and better.
 
          This driver accesses the serial port hardware directly, so
@@ -223,7 +223,7 @@ config SND_AC97_POWER_SAVE
          the device frequently.  A value of 10 seconds would be a
          good choice for normal operations.
 
-         See Documentation/sound/alsa/powersave.txt for more details.
+         See Documentation/sound/designs/powersave.rst for more details.
 
 config SND_AC97_POWER_SAVE_DEFAULT
        int "Default time-out for AC97 power-save mode"
index d9f3fdb..4105d9f 100644 (file)
@@ -175,7 +175,7 @@ config SND_BT87X
        help
          If you want to record audio from TV cards based on
          Brooktree Bt878/Bt879 chips, say Y here and read
-         <file:Documentation/sound/alsa/Bt87x.txt>.
+         <file:Documentation/sound/cards/bt87x.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called snd-bt87x.
@@ -210,7 +210,7 @@ config SND_CMIPCI
        help
          If you want to use soundcards based on C-Media CMI8338, CMI8738,
          CMI8768 or CMI8770 chips, say Y here and read
-         <file:Documentation/sound/alsa/CMIPCI.txt>.
+         <file:Documentation/sound/cards/cmipci.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called snd-cmipci.
@@ -472,8 +472,8 @@ config SND_EMU10K1
          Audigy and E-mu APS (partially supported) soundcards.
 
          The confusing multitude of mixer controls is documented in
-         <file:Documentation/sound/alsa/SB-Live-mixer.txt> and
-         <file:Documentation/sound/alsa/Audigy-mixer.txt>.
+         <file:Documentation/sound/cards/sb-live-mixer.rst> and
+         <file:Documentation/sound/cards/audigy-mixer.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called snd-emu10k1.
@@ -735,7 +735,7 @@ config SND_MIXART
        select SND_PCM
        help
          If you want to use Digigram miXart soundcards, say Y here and
-         read <file:Documentation/sound/alsa/MIXART.txt>.
+         read <file:Documentation/sound/cards/mixart.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called snd-mixart.
index db9f15f..c0d7ea0 100644 (file)
@@ -170,7 +170,7 @@ struct prctl_mm_map {
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
  *
- * See Documentation/prctl/no_new_privs.txt for more details.
+ * See Documentation/userspace-api/no_new_privs.rst for more details.
  */
 #define PR_SET_NO_NEW_PRIVS    38
 #define PR_GET_NO_NEW_PRIVS    39
index 6a12bbf..7aba824 100644 (file)
@@ -201,7 +201,7 @@ static void mem_toupper(char *f, size_t len)
 
 /*
  * Check for "NAME_PATH" environment variable to override fs location (for
- * testing). This matches the recommendation in Documentation/sysfs-rules.txt
+ * testing). This matches the recommendation in Documentation/admin-guide/sysfs-rules.rst
  * for SYSFS_PATH.
  */
 static bool fs__env_override(struct fs *fs)
index 2934775..77e4891 100644 (file)
@@ -61,7 +61,7 @@ check_pos(struct bpf_insn_pos *pos)
 
 /*
  * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
- * Documentation/trace/kprobetrace.txt) to size field of BPF_LDX_MEM
+ * Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM
  * instruction (BPF_{B,H,W,DW}).
  */
 static int
index 4f80ad7..f8fcb06 100644 (file)
@@ -105,7 +105,7 @@ override-dev-timeline-functions: true
 #       example: [color=#CC00CC]
 #
 #   arglist: A list of arguments from registers/stack addresses. See URL:
-#            https://www.kernel.org/doc/Documentation/trace/kprobetrace.txt
+#            https://www.kernel.org/doc/Documentation/trace/kprobetrace.rst
 #
 #       example: cpu=%di:s32
 #
@@ -170,7 +170,7 @@ pm_restore_console:
 #       example: [color=#CC00CC]
 #
 #   arglist: A list of arguments from registers/stack addresses. See URL:
-#            https://www.kernel.org/doc/Documentation/trace/kprobetrace.txt
+#            https://www.kernel.org/doc/Documentation/trace/kprobetrace.rst
 #
 #       example: port=+36(%di):s32
 #